From 35d4aa7a53bc79890f681bcc082c3570a16f2979 Mon Sep 17 00:00:00 2001
From: Konstantin Schulz
Date: Wed, 27 May 2020 16:09:54 +0200
Subject: [PATCH] added frequency analysis to the OpenAPI specification
---
mc_backend/csm/app/api/__init__.py | 3 +-
.../csm/app/api/corpusStorageManagerAPI.py | 3 +-
mc_backend/csm/app/api/frequencyAPI.py | 39 +-
mc_backend/csm/csm_api.yaml | 31 +
mc_backend/mcserver/app/__init__.py | 13 +-
mc_backend/mcserver/app/api/__init__.py | 8 +-
mc_backend/mcserver/app/api/exerciseAPI.py | 5 +-
.../mcserver/app/api/exerciseListAPI.py | 79 +-
mc_backend/mcserver/app/api/fileAPI.py | 113 ++-
mc_backend/mcserver/app/api/frequencyAPI.py | 26 +-
.../mcserver/app/api/staticExercisesAPI.py | 9 +-
mc_backend/mcserver/app/models.py | 57 +-
.../app/services/annotationService.py | 28 +-
.../mcserver/app/services/corpusService.py | 27 +-
.../mcserver/app/services/fileService.py | 13 +-
.../mcserver/app/services/frequencyService.py | 89 ++-
.../mcserver/app/services/xmlService.py | 2 +-
mc_backend/mcserver/config.py | 11 +-
mc_backend/mcserver/mcserver_api.yaml | 725 ++++--------------
mc_backend/mcserver/migrations/env.py | 2 +-
mc_backend/mocks.py | 6 +-
.../controllers/default_controller.py | 78 +-
.../openapi/openapi_server/models/__init__.py | 6 +-
.../openapi_server/models/file_type.py | 44 ++
.../openapi_server/models/frequency_item.py | 14 +-
.../openapi_server/models/inline_object.py | 150 ++++
.../models/inline_response200.py | 308 +-------
.../openapi_server/models/phenomenon.py | 44 ++
.../openapi_server/models/static_exercise.py | 94 +++
.../openapi_server/openapi/openapi.yaml | 667 +++++++++-------
mc_backend/openapi_models.yaml | 600 +++++++++++++++
mc_backend/requirements.txt | 2 +
mc_backend/tests.py | 82 +-
mc_frontend/openapi/api/default.service.ts | 285 ++++++-
mc_frontend/openapi/model/fileType.ts | 25 +
mc_frontend/openapi/model/frequencyItem.ts | 5 +-
mc_frontend/openapi/model/inlineObject.ts | 33 +
.../openapi/model/inlineResponse200.ts | 24 +
mc_frontend/openapi/model/models.ts | 6 +-
mc_frontend/openapi/model/phenomenon.ts | 25 +
mc_frontend/openapi/model/staticExercise.ts | 27 +
mc_frontend/src/app/corpus.service.spec.ts | 27 +-
mc_frontend/src/app/corpus.service.ts | 34 +-
.../exercise-parameters.page.spec.ts | 17 +-
.../exercise-parameters.page.ts | 7 +-
mc_frontend/src/app/models/enum.ts | 11 +-
mc_frontend/src/app/models/mockMC.ts | 5 +-
mc_frontend/src/app/models/phenomenonMap.ts | 4 +-
mc_frontend/src/app/models/queryMC.ts | 3 +-
.../src/app/preview/preview.page.spec.ts | 2 +-
mc_frontend/src/app/preview/preview.page.ts | 16 +-
51 files changed, 2350 insertions(+), 1584 deletions(-)
create mode 100644 mc_backend/csm/csm_api.yaml
create mode 100644 mc_backend/openapi/openapi_server/models/file_type.py
create mode 100644 mc_backend/openapi/openapi_server/models/inline_object.py
create mode 100644 mc_backend/openapi/openapi_server/models/phenomenon.py
create mode 100644 mc_backend/openapi/openapi_server/models/static_exercise.py
create mode 100644 mc_backend/openapi_models.yaml
create mode 100644 mc_frontend/openapi/model/fileType.ts
create mode 100644 mc_frontend/openapi/model/inlineObject.ts
create mode 100644 mc_frontend/openapi/model/inlineResponse200.ts
create mode 100644 mc_frontend/openapi/model/phenomenon.ts
create mode 100644 mc_frontend/openapi/model/staticExercise.ts
diff --git a/mc_backend/csm/app/api/__init__.py b/mc_backend/csm/app/api/__init__.py
index b84b660..454d133 100644
--- a/mc_backend/csm/app/api/__init__.py
+++ b/mc_backend/csm/app/api/__init__.py
@@ -7,14 +7,13 @@ from mcserver import Config
bp = Blueprint("api", __name__)
api = Api(bp)
+from . import frequencyAPI
from csm.app.api.annisFindAPI import AnnisFindAPI
from csm.app.api.corpusStorageManagerAPI import CorpusStorageManagerAPI
-from csm.app.api.frequencyAPI import FrequencyAPI
from csm.app.api.subgraphAPI import SubgraphAPI
from csm.app.api.textcomplexityAPI import TextComplexityAPI
api.add_resource(AnnisFindAPI, Config.SERVER_URI_ANNIS_FIND, endpoint="find")
api.add_resource(CorpusStorageManagerAPI, Config.SERVER_URI_CSM, endpoint="csm")
-api.add_resource(FrequencyAPI, Config.SERVER_URI_FREQUENCY, endpoint="frequency")
api.add_resource(SubgraphAPI, Config.SERVER_URI_CSM_SUBGRAPH, endpoint="subgraph")
api.add_resource(TextComplexityAPI, Config.SERVER_URI_TEXT_COMPLEXITY, endpoint='textcomplexity')
diff --git a/mc_backend/csm/app/api/corpusStorageManagerAPI.py b/mc_backend/csm/app/api/corpusStorageManagerAPI.py
index 678cc84..4200af2 100644
--- a/mc_backend/csm/app/api/corpusStorageManagerAPI.py
+++ b/mc_backend/csm/app/api/corpusStorageManagerAPI.py
@@ -51,7 +51,8 @@ class CorpusStorageManagerAPI(Resource):
annotations_or_urn: str = args["annotations"]
aqls: List[str] = args["aqls"]
exercise_type: ExerciseType = ExerciseType[args["exercise_type"]]
- search_phenomena: List[Phenomenon] = [Phenomenon[x] for x in args["search_phenomena"]]
+ search_phenomena: List[Phenomenon] = [Phenomenon().__getattribute__(x.upper()) for x in
+ args["search_phenomena"]]
conll: List[TokenList] = CorpusService.get_annotations_from_string(annotations_or_urn)
ret_val: dict = CorpusService.process_corpus_data(title, conll, aqls, exercise_type, search_phenomena)
# serialize the results to json
diff --git a/mc_backend/csm/app/api/frequencyAPI.py b/mc_backend/csm/app/api/frequencyAPI.py
index 070e143..e7fcf7a 100644
--- a/mc_backend/csm/app/api/frequencyAPI.py
+++ b/mc_backend/csm/app/api/frequencyAPI.py
@@ -1,30 +1,17 @@
from typing import List, Dict, Set
-import flask
-from flask_restful import Resource
-from flask_restful.reqparse import RequestParser
-
-from mcserver.app.models import FrequencyAnalysis, Phenomenon
+from mcserver.app.models import Phenomenon, FrequencyItem
from mcserver.app.services import NetworkService, CorpusService, AnnotationService
-class FrequencyAPI(Resource):
- def __init__(self):
- self.reqparse: RequestParser = NetworkService.base_request_parser.copy()
- self.reqparse.add_argument("urn", type=str, required=True, default="", location="form", help="No URN provided")
- super(FrequencyAPI, self).__init__()
-
- def get(self):
- """ Returns results for a frequency query from ANNIS for a given CTS URN and AQL. """
- # get request arguments
- args: dict = flask.request.args
- urn: str = args["urn"]
- fa: FrequencyAnalysis = CorpusService.get_frequency_analysis(urn, is_csm=True)
- # map the abbreviated values found by ANNIS to our own model
- skip_set: Set[Phenomenon] = {Phenomenon.lemma, Phenomenon.dependency}
- for fi in fa:
- for i in range(len(fi.values)):
- if fi.phenomena[i] in skip_set:
- continue
- value_map: Dict[str, List[str]] = AnnotationService.phenomenon_map[fi.phenomena[i]]
- fi.values[i] = next((x for x in value_map if fi.values[i] in value_map[x]), None)
- return NetworkService.make_json_response(fa.serialize())
+def get(urn: str):
+ """ Returns results for a frequency query from ANNIS for a given CTS URN and AQL. """
+ fa: List[FrequencyItem] = CorpusService.get_frequency_analysis(urn, is_csm=True)
+ # map the abbreviated values found by ANNIS to our own model
+ skip_set: Set[Phenomenon] = {Phenomenon.LEMMA, Phenomenon.DEPENDENCY}
+ for fi in fa:
+ for i in range(len(fi.values)):
+ if fi.phenomena[i] in skip_set:
+ continue
+ value_map: Dict[str, List[str]] = AnnotationService.phenomenon_map[fi.phenomena[i]]
+ fi.values[i] = next((x for x in value_map if fi.values[i] in value_map[x]), None)
+ return NetworkService.make_json_response([x.to_dict() for x in fa])
diff --git a/mc_backend/csm/csm_api.yaml b/mc_backend/csm/csm_api.yaml
new file mode 100644
index 0000000..7100823
--- /dev/null
+++ b/mc_backend/csm/csm_api.yaml
@@ -0,0 +1,31 @@
+openapi: "3.0.0"
+
+info:
+ title: Machina Callida Backend REST API (Corpus Storage Manager)
+ version: "1.0"
+servers:
+ - url: http://localhost:6555/mc/api/v1.0
+
+paths:
+ /frequency:
+ get:
+ summary: Returns results for a frequency query from ANNIS for a given CTS URN.
+ operationId: csm.app.api.frequencyAPI.get
+ responses:
+ 200:
+ description: Frequency analysis, i.e. a list of frequency items.
+ content:
+ application/json:
+ schema:
+ type: array
+ description: List of items with frequency data for linguistic phenomena.
+ items:
+ $ref: "../openapi_models.yaml#/components/schemas/FrequencyItem"
+ parameters:
+ - name: urn
+ in: query
+ description: CTS URN for referencing the corpus.
+ required: true
+ schema:
+ type: string
+ example: urn:cts:latinLit:phi1254.phi001.perseus-lat2:5.6.21-5.6.21
diff --git a/mc_backend/mcserver/app/__init__.py b/mc_backend/mcserver/app/__init__.py
index 667c836..e6c6e17 100644
--- a/mc_backend/mcserver/app/__init__.py
+++ b/mc_backend/mcserver/app/__init__.py
@@ -3,12 +3,14 @@ import logging
import os
import sys
from logging.handlers import RotatingFileHandler
+from pathlib import Path
from threading import Thread
from time import strftime
from typing import Type
import connexion
import flask
import open_alchemy
+import prance
from connexion import FlaskApp
from flask import Flask, got_request_exception, request, Response, send_from_directory
from flask_cors import CORS
@@ -21,7 +23,7 @@ db: SQLAlchemy = SQLAlchemy() # session_options={"autocommit": True}
migrate: Migrate = Migrate(directory=Config.MIGRATIONS_DIRECTORY)
if not hasattr(open_alchemy.models, Config.DATABASE_TABLE_CORPUS):
# do this _BEFORE_ you add any APIs to your application
- init_yaml(Config.API_SPEC_YAML_FILE_PATH, base=db.Model,
+ init_yaml(Config.API_SPEC_MODELS_YAML_FILE_PATH, base=db.Model,
models_filename=os.path.join(Config.MC_SERVER_DIRECTORY, "models_auto.py"))
@@ -76,10 +78,13 @@ def full_init(app: Flask, cfg: Type[Config] = Config) -> None:
def init_app_common(cfg: Type[Config] = Config, is_csm: bool = False) -> Flask:
""" Initializes common Flask parts, e.g. CORS, configuration, database, migrations and custom corpora."""
+ spec_dir: str = Config.CSM_DIRECTORY if is_csm else Config.MC_SERVER_DIRECTORY
connexion_app: FlaskApp = connexion.FlaskApp(
- __name__, port=(cfg.CORPUS_STORAGE_MANAGER_PORT if is_csm else cfg.HOST_PORT),
- specification_dir=Config.MC_SERVER_DIRECTORY)
- connexion_app.add_api(Config.API_SPEC_YAML_FILE_PATH, arguments={'title': 'Machina Callida Backend REST API'})
+ __name__, port=(cfg.CORPUS_STORAGE_MANAGER_PORT if is_csm else cfg.HOST_PORT), specification_dir=spec_dir)
+ spec_path: str = Config.API_SPEC_CSM_FILE_PATH if is_csm else Config.API_SPEC_MCSERVER_FILE_PATH
+ parser = prance.ResolvingParser(spec_path, lazy=True, strict=False) # str(Path(spec_path).absolute())
+ parser.parse()
+ connexion_app.add_api(parser.specification)
apply_event_handlers(connexion_app)
app: Flask = connexion_app.app
# allow CORS requests for all API routes
diff --git a/mc_backend/mcserver/app/api/__init__.py b/mc_backend/mcserver/app/api/__init__.py
index 84c2c04..73ea9a8 100644
--- a/mc_backend/mcserver/app/api/__init__.py
+++ b/mc_backend/mcserver/app/api/__init__.py
@@ -6,10 +6,7 @@ from mcserver import Config
bp = Blueprint("api", __name__)
api = Api(bp)
-from . import corpusAPI, corpusListAPI, exerciseAPI, staticExercisesAPI
-from mcserver.app.api.exerciseListAPI import ExerciseListAPI
-from mcserver.app.api.fileAPI import FileAPI
-from mcserver.app.api.frequencyAPI import FrequencyAPI
+from . import corpusAPI, corpusListAPI, exerciseAPI, exerciseListAPI, fileAPI, frequencyAPI, staticExercisesAPI
from mcserver.app.api.h5pAPI import H5pAPI
from mcserver.app.api.kwicAPI import KwicAPI
from mcserver.app.api.rawTextAPI import RawTextAPI
@@ -18,9 +15,6 @@ from mcserver.app.api.validReffAPI import ValidReffAPI
from mcserver.app.api.vectorNetworkAPI import VectorNetworkAPI
from mcserver.app.api.vocabularyAPI import VocabularyAPI
-api.add_resource(ExerciseListAPI, Config.SERVER_URI_EXERCISE_LIST, endpoint="exerciseList")
-api.add_resource(FileAPI, Config.SERVER_URI_FILE, endpoint="file")
-api.add_resource(FrequencyAPI, Config.SERVER_URI_FREQUENCY, endpoint="frequency")
api.add_resource(H5pAPI, Config.SERVER_URI_H5P, endpoint="h5p")
api.add_resource(KwicAPI, Config.SERVER_URI_KWIC, endpoint="kwic")
api.add_resource(RawTextAPI, Config.SERVER_URI_RAW_TEXT, endpoint="rawtext")
diff --git a/mc_backend/mcserver/app/api/exerciseAPI.py b/mc_backend/mcserver/app/api/exerciseAPI.py
index 21d15fe..8b96592 100644
--- a/mc_backend/mcserver/app/api/exerciseAPI.py
+++ b/mc_backend/mcserver/app/api/exerciseAPI.py
@@ -49,7 +49,7 @@ def get_graph_data(title: str, conll_string_or_urn: str, aqls: List[str], exerci
url: str = f"{Config.INTERNET_PROTOCOL}{Config.HOST_IP_CSM}:{Config.CORPUS_STORAGE_MANAGER_PORT}"
data: str = json.dumps(
dict(title=title, annotations=conll_string_or_urn, aqls=aqls, exercise_type=exercise_type.name,
- search_phenomena=[x.name for x in search_phenomena]))
+ search_phenomena=search_phenomena))
response: requests.Response = requests.post(url, data=data)
try:
return json.loads(response.text)
@@ -117,7 +117,8 @@ def post(exercise_data: dict) -> Union[Response, ConnexionResponse]:
search_values_list: List[str] = json.loads(exercise_data["search_values"])
aqls: List[str] = AnnotationService.map_search_values_to_aql(search_values_list=search_values_list,
exercise_type=exercise_type)
- search_phenomena: List[Phenomenon] = [Phenomenon[x.split("=")[0]] for x in search_values_list]
+ search_phenomena: List[Phenomenon] = [Phenomenon().__getattribute__(x.split("=")[0].upper()) for x in
+ search_values_list]
urn: str = exercise_data.get("urn", "")
# if there is custom text instead of a URN, immediately annotate it
conll_string_or_urn: str = urn if CorpusService.is_urn(urn) else AnnotationService.get_udpipe(
diff --git a/mc_backend/mcserver/app/api/exerciseListAPI.py b/mc_backend/mcserver/app/api/exerciseListAPI.py
index bb7a788..f06ecd6 100644
--- a/mc_backend/mcserver/app/api/exerciseListAPI.py
+++ b/mc_backend/mcserver/app/api/exerciseListAPI.py
@@ -3,58 +3,39 @@ from typing import List, Set
import conllu
from conllu import TokenList
-from flask_restful import Resource
-from flask_restful.reqparse import RequestParser
-
from mcserver.app import db
from mcserver.app.models import Language, VocabularyCorpus, ResourceType
from mcserver.app.services import NetworkService, FileService
from mcserver.models_auto import Exercise, UpdateInfo
-class ExerciseListAPI(Resource):
- """The exercise list API resource. It enables some of the CRUD operations for the exercises from the database."""
-
- def __init__(self):
- """Initialize possible arguments for calls to the exercise list REST API."""
- self.reqparse: RequestParser = NetworkService.base_request_parser.copy()
- self.reqparse.add_argument("lang", type=str, required=True, help="No language specified")
- self.reqparse.add_argument("last_update_time", type=int, required=False, default=0,
- help="No milliseconds time for last update provided")
- self.reqparse.add_argument("vocabulary", type=str, required=False, help="No reference vocabulary provided")
- self.reqparse.add_argument("frequency_upper_bound", type=int, required=False,
- help="No upper bound for reference vocabulary frequency provided")
- super(ExerciseListAPI, self).__init__()
-
- def get(self):
- """The GET method for the exercise list REST API. It provides metadata for all available exercises."""
- args: dict = self.reqparse.parse_args()
- vocabulary_set: Set[str]
- last_update: int = args["last_update_time"]
- ui_exercises: UpdateInfo = db.session.query(UpdateInfo).filter_by(
- resource_type=ResourceType.exercise_list.name).first()
- db.session.commit()
- if ui_exercises.last_modified_time < last_update / 1000:
- return NetworkService.make_json_response([])
- try:
- vc: VocabularyCorpus = VocabularyCorpus[args["vocabulary"]]
- vocabulary_set = FileService.get_vocabulary_set(vc, args["frequency_upper_bound"])
- except KeyError:
- vocabulary_set = set()
- lang: Language
- try:
- lang = Language(args["lang"])
- except ValueError:
- lang = Language.English
- exercises: List[Exercise] = db.session.query(Exercise).filter_by(language=lang.value)
- db.session.commit()
- ret_val: List[dict] = [NetworkService.serialize_exercise(x, compress=True) for x in exercises]
- matching_degrees: List[float] = []
- if len(vocabulary_set):
- for exercise in exercises:
- conll: List[TokenList] = conllu.parse(exercise.conll)
- lemmata: List[str] = [tok["lemma"] for sent in conll for tok in sent.tokens]
- matching_degrees.append(sum((1 if x in vocabulary_set else 0) for x in lemmata) / len(lemmata) * 100)
- for i in range(len(ret_val)):
- ret_val[i]["matching_degree"] = matching_degrees[i]
- return NetworkService.make_json_response(ret_val)
+def get(lang: str, frequency_upper_bound: int, last_update_time: int, vocabulary: str = ""):
+ """The GET method for the exercise list REST API. It provides metadata for all available exercises."""
+ vocabulary_set: Set[str]
+ ui_exercises: UpdateInfo = db.session.query(UpdateInfo).filter_by(
+ resource_type=ResourceType.exercise_list.name).first()
+ db.session.commit()
+ if ui_exercises.last_modified_time < last_update_time / 1000:
+ return NetworkService.make_json_response([])
+ try:
+ vc: VocabularyCorpus = VocabularyCorpus[vocabulary]
+ vocabulary_set = FileService.get_vocabulary_set(vc, frequency_upper_bound)
+ except KeyError:
+ vocabulary_set = set()
+ lang: Language
+ try:
+ lang = Language(lang)
+ except ValueError:
+ lang = Language.English
+ exercises: List[Exercise] = db.session.query(Exercise).filter_by(language=lang.value)
+ db.session.commit()
+ ret_val: List[dict] = [NetworkService.serialize_exercise(x, compress=True) for x in exercises]
+ matching_degrees: List[float] = []
+ if len(vocabulary_set):
+ for exercise in exercises:
+ conll: List[TokenList] = conllu.parse(exercise.conll)
+ lemmata: List[str] = [tok["lemma"] for sent in conll for tok in sent.tokens]
+ matching_degrees.append(sum((1 if x in vocabulary_set else 0) for x in lemmata) / len(lemmata) * 100)
+ for i in range(len(ret_val)):
+ ret_val[i]["matching_degree"] = matching_degrees[i]
+ return NetworkService.make_json_response(ret_val)
diff --git a/mc_backend/mcserver/app/api/fileAPI.py b/mc_backend/mcserver/app/api/fileAPI.py
index 3df7399..fea6907 100644
--- a/mc_backend/mcserver/app/api/fileAPI.py
+++ b/mc_backend/mcserver/app/api/fileAPI.py
@@ -5,8 +5,10 @@ import uuid
from datetime import datetime
from typing import List, Union
+import connexion
import flask
-from flask import send_from_directory
+from connexion.lifecycle import ConnexionResponse
+from flask import send_from_directory, Response
from flask_restful import Resource, abort
from flask_restful.reqparse import RequestParser
from werkzeug.wrappers import ETagResponseMixin
@@ -18,71 +20,6 @@ from mcserver.config import Config
from mcserver.models_auto import Exercise, UpdateInfo, LearningResult
-class FileAPI(Resource):
- """The file API resource. It allows users to download files that are stored as strings in the database."""
-
- def __init__(self):
- """Initialize possible arguments for calls to the file REST API."""
- self.reqparse: RequestParser = NetworkService.base_request_parser.copy()
- self.reqparse.add_argument("id", type=str, required=False, location="args",
- help="No exercise ID or URN provided")
- self.reqparse.add_argument("type", type=str, required=False, location="args", help="No file type provided")
- self.reqparse.add_argument("solution_indices", type=str, required=False, location="args",
- help="No solution IDs provided")
- self.reqparse.add_argument("learning_result", type=str, required=False, location="form",
- help="No learning result provided")
- self.reqparse.add_argument("html_content", type=str, required=False, location="form",
- help="No HTML content provided")
- self.reqparse.add_argument("file_type", type=str, required=False, location="form",
- help="No file type provided")
- self.reqparse.add_argument("urn", type=str, required=False, location="form", help="No URN provided")
- super(FileAPI, self).__init__()
-
- def get(self) -> ETagResponseMixin:
- """The GET method for the file REST API. It provides the URL to download a specific file."""
- clean_tmp_folder()
- args = self.reqparse.parse_args()
- eid: str = args["id"]
- exercise: Exercise = db.session.query(Exercise).filter_by(eid=eid).first()
- db.session.commit()
- file_type: FileType = FileType[args["type"]]
- file_name: str = eid + "." + file_type.value
- mime_type: str = MimeType[file_type.value].value
- if exercise is None:
- # try and see if a file is already cached on disk
- if not os.path.exists(os.path.join(Config.TMP_DIRECTORY, file_name)):
- abort(404)
- return send_from_directory(Config.TMP_DIRECTORY, file_name, mimetype=mime_type, as_attachment=True)
- exercise.last_access_time = datetime.utcnow().timestamp()
- db.session.commit()
- solution_indices: List[int] = json.loads(args["solution_indices"] if args["solution_indices"] else "null")
- if solution_indices is not None:
- file_name = eid + "-" + str(uuid.uuid4()) + "." + file_type.value
- existing_file: DownloadableFile = next(
- (x for x in FileService.downloadable_files if x.id + "." + x.file_type.value == file_name), None)
- if existing_file is None:
- existing_file = FileService.make_tmp_file_from_exercise(file_type, exercise, solution_indices)
- return send_from_directory(Config.TMP_DIRECTORY, existing_file.file_name, mimetype=mime_type,
- as_attachment=True)
-
- def post(self) -> Union[None, ETagResponseMixin]:
- """ The POST method for the File REST API.
-
- It writes learning results or HTML content to the disk for later access. """
- form_data: dict = flask.request.form
- lr_string: str = form_data.get("learning_result", None)
- if lr_string:
- lr_dict: dict = json.loads(lr_string)
- for exercise_id in lr_dict:
- xapi_statement: XapiStatement = XapiStatement(lr_dict[exercise_id])
- save_learning_result(xapi_statement)
- else:
- file_type: FileType = FileType[form_data["file_type"]]
- existing_file: DownloadableFile = FileService.make_tmp_file_from_html(form_data["urn"], file_type,
- form_data["html_content"])
- return NetworkService.make_json_response(existing_file.file_name)
-
-
def clean_tmp_folder():
""" Cleans the files directory regularly. """
ui_file: UpdateInfo = db.session.query(UpdateInfo).filter_by(resource_type=ResourceType.file_api_clean.name).first()
@@ -91,7 +28,7 @@ def clean_tmp_folder():
for file in [x for x in os.listdir(Config.TMP_DIRECTORY) if x not in ".gitignore"]:
file_to_delete_type: str = os.path.splitext(file)[1].replace(".", "")
file_to_delete: DownloadableFile = next((x for x in FileService.downloadable_files if
- x.file_name == file and x.file_type.value == file_to_delete_type),
+ x.file_name == file and x.file_type == file_to_delete_type),
None)
if file_to_delete is not None:
FileService.downloadable_files.remove(file_to_delete)
@@ -100,6 +37,48 @@ def clean_tmp_folder():
db.session.commit()
+def get(id: str, type: FileType, solution_indices: List[int]) -> Union[ETagResponseMixin, ConnexionResponse]:
+ """The GET method for the file REST API. It provides the URL to download a specific file."""
+ clean_tmp_folder()
+ exercise: Exercise = db.session.query(Exercise).filter_by(eid=id).first()
+ db.session.commit()
+ file_name: str = id + "." + str(type)
+ mime_type: str = MimeType[type].value
+ if exercise is None:
+ # try and see if a file is already cached on disk
+ if not os.path.exists(os.path.join(Config.TMP_DIRECTORY, file_name)):
+ return connexion.problem(404, Config.ERROR_TITLE_NOT_FOUND, Config.ERROR_MESSAGE_EXERCISE_NOT_FOUND)
+ return send_from_directory(Config.TMP_DIRECTORY, file_name, mimetype=mime_type, as_attachment=True)
+ exercise.last_access_time = datetime.utcnow().timestamp()
+ db.session.commit()
+ if solution_indices:
+ file_name = id + "-" + str(uuid.uuid4()) + "." + str(type)
+ existing_file: DownloadableFile = next(
+ (x for x in FileService.downloadable_files if x.id + "." + str(x.file_type) == file_name), None)
+ if existing_file is None:
+ existing_file = FileService.make_tmp_file_from_exercise(type, exercise, solution_indices)
+ return send_from_directory(Config.TMP_DIRECTORY, existing_file.file_name, mimetype=mime_type,
+ as_attachment=True)
+
+
+def post(file_data: dict) -> Response:
+ """ The POST method for the File REST API.
+
+ It writes learning results or HTML content to the disk for later access. """
+ lr_string: str = file_data.get("learning_result", None)
+ if lr_string:
+ lr_dict: dict = json.loads(lr_string)
+ for exercise_id in lr_dict:
+ xapi_statement: XapiStatement = XapiStatement(lr_dict[exercise_id])
+ save_learning_result(xapi_statement)
+ return NetworkService.make_json_response(str(True))
+ else:
+ file_type: FileType = file_data["file_type"]
+ existing_file: DownloadableFile = FileService.make_tmp_file_from_html(file_data["urn"], file_type,
+ file_data["html_content"])
+ return NetworkService.make_json_response(existing_file.file_name)
+
+
def save_learning_result(xapi_statement: XapiStatement) -> LearningResult:
"""Creates a new Learning Result from a XAPI Statement and saves it to the database."""
learning_result: LearningResult = LearningResultMC.from_dict(
diff --git a/mc_backend/mcserver/app/api/frequencyAPI.py b/mc_backend/mcserver/app/api/frequencyAPI.py
index fb3ce42..a9dd4de 100644
--- a/mc_backend/mcserver/app/api/frequencyAPI.py
+++ b/mc_backend/mcserver/app/api/frequencyAPI.py
@@ -1,26 +1,12 @@
-import flask
import requests
-from flask_restful import Resource
import rapidjson as json
-from flask_restful.reqparse import RequestParser
-
from mcserver import Config
from mcserver.app.services import NetworkService
-class FrequencyAPI(Resource):
- def __init__(self):
- # TODO: FIX THE REQUEST PARSING FOR ALL APIs
- self.reqparse: RequestParser = NetworkService.base_request_parser.copy()
- self.reqparse.add_argument("urn", type=str, required=True, default="", location="form", help="No URN provided")
- super(FrequencyAPI, self).__init__()
-
- def get(self):
- """ Returns results for a frequency query from ANNIS for a given CTS URN and AQL. """
- # get request arguments
- args: dict = flask.request.args
- urn: str = args["urn"]
- url: str = f"{Config.INTERNET_PROTOCOL}{Config.HOST_IP_CSM}:{Config.CORPUS_STORAGE_MANAGER_PORT}" + \
- Config.SERVER_URI_FREQUENCY
- response: requests.Response = requests.get(url, params=dict(urn=urn))
- return NetworkService.make_json_response(json.loads(response.text))
+def get(urn: str):
+ """ Returns results for a frequency query from ANNIS for a given CTS URN and AQL. """
+ url: str = f"{Config.INTERNET_PROTOCOL}{Config.HOST_IP_CSM}:{Config.CORPUS_STORAGE_MANAGER_PORT}" + \
+ Config.SERVER_URI_FREQUENCY
+ response: requests.Response = requests.get(url, params=dict(urn=urn))
+ return NetworkService.make_json_response(json.loads(response.text))
diff --git a/mc_backend/mcserver/app/api/staticExercisesAPI.py b/mc_backend/mcserver/app/api/staticExercisesAPI.py
index 5b9aec8..c9e865c 100644
--- a/mc_backend/mcserver/app/api/staticExercisesAPI.py
+++ b/mc_backend/mcserver/app/api/staticExercisesAPI.py
@@ -27,7 +27,8 @@ def get() -> Union[Response, ConnexionResponse]:
if datetime.fromtimestamp(time() - Config.INTERVAL_STATIC_EXERCISES) > NetworkService.exercises_last_update \
or len(NetworkService.exercises) == 0:
return update_exercises()
- return NetworkService.make_json_response({k: v.__dict__ for (k, v) in NetworkService.exercises.items()})
+ return NetworkService.make_json_response(
+ {x: NetworkService.exercises[x].to_dict() for x in NetworkService.exercises})
def get_relevant_strings(response: Response):
@@ -136,11 +137,13 @@ def update_exercises() -> Union[Response, ConnexionResponse]:
search_results_dict: Dict[str, int] = {item[0]: i for (i, item) in enumerate(search_results)}
for url in relevant_strings_dict:
# the URN points to Cicero's letters to his brother Quintus, 1.1.8-1.1.10
- NetworkService.exercises[url] = StaticExercise(urn="urn:cts:latinLit:phi0474.phi058.perseus-lat1:1.1.8-1.1.10")
+ NetworkService.exercises[url] = StaticExercise(
+ solutions=[], urn="urn:cts:latinLit:phi0474.phi058.perseus-lat1:1.1.8-1.1.10")
for word in relevant_strings_dict[url]:
# UDpipe cannot handle name abbreviations, so remove the punctuation and only keep the upper case letter
if word[-1] in string.punctuation:
word = word[:-1]
NetworkService.exercises[url].solutions.append(list(search_results[search_results_dict[word]]))
NetworkService.exercises_last_update = datetime.fromtimestamp(time())
- return NetworkService.make_json_response({k: v.__dict__ for (k, v) in NetworkService.exercises.items()})
+ return NetworkService.make_json_response(
+ {x: NetworkService.exercises[x].to_dict() for x in NetworkService.exercises})
diff --git a/mc_backend/mcserver/app/models.py b/mc_backend/mcserver/app/models.py
index b3cc0f9..50f78d4 100644
--- a/mc_backend/mcserver/app/models.py
+++ b/mc_backend/mcserver/app/models.py
@@ -1,17 +1,21 @@
"""Models for dealing with text data, both in the database and in the application itself."""
-from typing import Dict, List, Union, Any
+from typing import Dict, List
from enum import Enum
import typing
from mcserver.config import Config
from mcserver.models_auto import TExercise, Corpus, TCorpus, Exercise, TLearningResult, LearningResult
from openapi.openapi_server.models import SolutionElement, Solution, Link, NodeMC, TextComplexity, AnnisResponse, \
- GraphData
+ GraphData, StaticExercise, FileType, FrequencyItem, Phenomenon
AnnisResponse = AnnisResponse
+FileType = FileType
+FrequencyItem = FrequencyItem
GraphData = GraphData
LinkMC = Link
NodeMC = NodeMC
+Phenomenon = Phenomenon
SolutionElement = SolutionElement
+StaticExercise = StaticExercise
TextComplexity = TextComplexity
@@ -74,7 +78,8 @@ class Dependency(Enum):
punctuation = 26
root = 27
subject = 28
- vocative = 29
+ unspecified = 29
+ vocative = 30
class ExerciseType(Enum):
@@ -84,11 +89,8 @@ class ExerciseType(Enum):
matching = "matching"
-class FileType(Enum):
- docx = "docx"
- json = "json"
- pdf = "pdf"
- xml = "xml"
+class Feats(Enum):
+ Case = "case"
class Language(Enum):
@@ -129,13 +131,6 @@ class PartOfSpeech(Enum):
verb = 15
-class Phenomenon(Enum):
- case = "feats"
- dependency = "dependency"
- lemma = "lemma"
- partOfSpeech = "upostag"
-
-
class ResourceType(Enum):
"""Resource types for the UpdateInfo table in the database.
@@ -466,35 +461,3 @@ class Sentence:
def __init__(self, id: int, matching_degree: int):
self.id = id
self.matching_degree = matching_degree
-
-
-class StaticExercise:
- def __init__(self, solutions: List[List[str]] = None, urn: str = ""):
- self.solutions = [] if solutions is None else solutions
- self.urn = urn
-
-
-class FrequencyItem:
-
- def __init__(self, values: List[str], phenomena: List[Phenomenon], count: Union[int, Any]):
- self.values = values
- self.phenomena = phenomena
- self.count = count
-
- def serialize(self) -> dict:
- ret_val: dict = self.__dict__
- ret_val["phenomena"] = [x.name for x in self.phenomena]
- return ret_val
-
-
-class FrequencyAnalysis(List[FrequencyItem]):
-
- def __init__(self, json_list: list = None):
- if json_list:
- for x in json_list:
- self.append(FrequencyItem(x["values"], [Phenomenon[y] for y in x["phenomena"]], x["count"]))
- else:
- super(FrequencyAnalysis).__init__()
-
- def serialize(self) -> List[dict]:
- return [x.serialize() for x in self]
diff --git a/mc_backend/mcserver/app/services/annotationService.py b/mc_backend/mcserver/app/services/annotationService.py
index 34ad920..58939c3 100644
--- a/mc_backend/mcserver/app/services/annotationService.py
+++ b/mc_backend/mcserver/app/services/annotationService.py
@@ -1,6 +1,5 @@
import os
import subprocess
-from enum import Enum
from sys import platform
from tempfile import mkstemp
from typing import Dict, List, Set, Tuple
@@ -20,8 +19,8 @@ class AnnotationService:
"""Service for adding annotations to raw texts."""
excluded_annotations_set: Set[str] = {'form', 'id', 'head', Config.AQL_DEPREL}
- phenomenon_map: Dict[Enum, Dict[str, List[str]]] = {
- Phenomenon.case: {
+ phenomenon_map: Dict[Phenomenon, Dict[str, List[str]]] = {
+ Phenomenon.FEATS: {
Case.ablative.name: ["Abl"],
Case.accusative.name: ["Acc"],
Case.dative.name: ["Dat"],
@@ -30,7 +29,7 @@ class AnnotationService:
Case.nominative.name: ["Nom"],
Case.vocative.name: ["Voc"],
},
- Phenomenon.partOfSpeech: {
+ Phenomenon.UPOSTAG: {
PartOfSpeech.adjective.name: ["ADJ"],
PartOfSpeech.adverb.name: ["ADV"],
PartOfSpeech.auxiliary.name: ["AUX"],
@@ -47,11 +46,11 @@ class AnnotationService:
PartOfSpeech.symbol.name: ["SYM"],
PartOfSpeech.verb.name: ["VERB"],
},
- Phenomenon.dependency: {
+ Phenomenon.DEPENDENCY: {
Dependency.adjectivalClause.name: ["acl"],
Dependency.adjectivalModifier.name: ["amod"],
Dependency.adverbialClauseModifier.name: ["advcl"],
- Dependency.adverbialModifier.name: ["advmod"],
+ Dependency.adverbialModifier.name: ["advmod", "advmod:emph"],
Dependency.appositionalModifier.name: ["appos"],
Dependency.auxiliary.name: ["aux", "aux:pass"],
Dependency.caseMarking.name: ["case"],
@@ -77,9 +76,10 @@ class AnnotationService:
Dependency.punctuation.name: ["punct"],
Dependency.root.name: ["root"],
Dependency.subject.name: ["nsubj", "nsubj:pass", "csubj", "csubj:pass"],
+ Dependency.unspecified.name: ["dep"],
Dependency.vocative.name: ["vocative"]
},
- Phenomenon.lemma: {}}
+ Phenomenon.LEMMA: {}}
@staticmethod
def add_urn_to_sentences(text_list: List[Tuple[str, str]], annotations: List[TokenList]) -> None:
@@ -263,28 +263,28 @@ class AnnotationService:
aql_parts: List[str] = []
for i in range(len(search_values_list)):
search_parts: List[str] = search_values_list[i].split("=")
- phenomenon: Phenomenon = Phenomenon[search_parts[0]]
+ phenomenon: Phenomenon = Phenomenon().__getattribute__(search_parts[0].upper())
raw_values: List[str] = search_parts[1].split("|")
aql_base: str
- if phenomenon == Phenomenon.dependency:
+ if phenomenon == Phenomenon.DEPENDENCY:
aql_base = f'node {Config.AQL_DEP}[{Config.AQL_DEPREL}=' + "{0}] node"
else:
- aql_base = phenomenon.value + '={0}'
- if phenomenon == Phenomenon.lemma:
+ aql_base = str(phenomenon) + '={0}'
+ if phenomenon == Phenomenon.LEMMA:
for rv in raw_values:
# need to prepare the mapping dynamically, so we can handle the following steps in a uniform way
AnnotationService.phenomenon_map[phenomenon][rv] = [rv]
for rv in raw_values:
translated_values: List[str] = AnnotationService.phenomenon_map[phenomenon][rv]
aql_part: str
- if phenomenon == Phenomenon.case:
+ if phenomenon == Phenomenon.FEATS:
aql_part = aql_base.format('/.*Case={0}.*/'.format(translated_values[0]))
else:
aql_part = aql_base.format(f'"{translated_values[0]}"' if len(
translated_values) == 1 else f"/({'|'.join(translated_values)})/")
- if AnnotationService.phenomenon_map[Phenomenon.dependency][Dependency.root.name][0] in aql_part:
+ if AnnotationService.phenomenon_map[Phenomenon.DEPENDENCY][Dependency.root.name][0] in aql_part:
aql_part = 'deps="{0}"'.format(
- AnnotationService.phenomenon_map[Phenomenon.dependency][Dependency.root.name][0])
+ AnnotationService.phenomenon_map[Phenomenon.DEPENDENCY][Dependency.root.name][0])
aql_parts.append(aql_part)
if exercise_type == ExerciseType.matching:
final_aql: str = f'{aql_parts[0]} {Config.AQL_DEP} {aql_parts[1]}'
diff --git a/mc_backend/mcserver/app/services/corpusService.py b/mc_backend/mcserver/app/services/corpusService.py
index 3ff75d0..7942af5 100644
--- a/mc_backend/mcserver/app/services/corpusService.py
+++ b/mc_backend/mcserver/app/services/corpusService.py
@@ -14,8 +14,8 @@ from networkx import graph, MultiDiGraph
from networkx.readwrite import json_graph
from requests import HTTPError
from mcserver.app import db
-from mcserver.app.models import CitationLevel, GraphData, Solution, ExerciseType, Phenomenon, FrequencyAnalysis, \
- AnnisResponse, CorpusMC, make_solution_element_from_salt_id
+from mcserver.app.models import CitationLevel, GraphData, Solution, ExerciseType, Phenomenon, AnnisResponse, CorpusMC, \
+ make_solution_element_from_salt_id, FrequencyItem
from mcserver.app.services import AnnotationService, XMLservice, TextService, FileService, FrequencyService, \
CustomCorpusService
from mcserver.config import Config
@@ -107,22 +107,23 @@ class CorpusService:
return AnnisResponse.from_dict(json.loads(response.text))
@staticmethod
- def get_frequency_analysis(urn: str, is_csm: bool) -> FrequencyAnalysis:
+ def get_frequency_analysis(urn: str, is_csm: bool) -> List[FrequencyItem]:
""" Collects frequency statistics for various combinations of linguistic annotations in a corpus. """
if is_csm:
ar: AnnisResponse = CorpusService.get_corpus(urn, is_csm)
search_phenomena: List[List[Phenomenon]] = []
- for head_phenomenon in Phenomenon:
- for base_phenomenon in Phenomenon:
- search_phenomena.append([head_phenomenon, base_phenomenon])
+ for head_phenomenon in list(x for x in Phenomenon.__dict__.keys() if x.isupper()):
+ for base_phenomenon in list(x for x in Phenomenon.__dict__.keys() if x.isupper()):
+ search_phenomena.append([Phenomenon().__getattribute__(head_phenomenon),
+ Phenomenon().__getattribute__(base_phenomenon)])
disk_urn: str = AnnotationService.get_disk_urn(urn)
- fa: FrequencyAnalysis = FrequencyAnalysis()
+ fa: List[FrequencyItem] = []
for search_phenomenon in search_phenomena:
- if Phenomenon.dependency in search_phenomenon:
+ if Phenomenon.DEPENDENCY in search_phenomenon:
continue
- elif search_phenomenon[0] == Phenomenon.case:
+ elif search_phenomenon[0] == Phenomenon.FEATS:
fa += FrequencyService.add_case_frequencies(disk_urn, search_phenomenon)
- elif search_phenomenon[0] in [Phenomenon.lemma, Phenomenon.partOfSpeech]:
+ elif search_phenomenon[0] in [Phenomenon.LEMMA, Phenomenon.UPOSTAG]:
fa += FrequencyService.add_generic_frequencies(disk_urn, search_phenomenon)
FrequencyService.add_dependency_frequencies(ar.graph_data, fa)
return FrequencyService.extract_case_values(fa)
@@ -130,7 +131,7 @@ class CorpusService:
url: str = Config.INTERNET_PROTOCOL + f"{Config.HOST_IP_CSM}:{Config.CORPUS_STORAGE_MANAGER_PORT}" + \
Config.SERVER_URI_FREQUENCY
response: requests.Response = requests.get(url, params=dict(urn=urn))
- return FrequencyAnalysis(json_list=json.loads(response.text))
+ return [FrequencyItem.from_dict(x) for x in json.loads(response.text)]
@staticmethod
def get_graph(cts_urn: str) -> MultiDiGraph:
@@ -191,14 +192,14 @@ class CorpusService:
node_ids: List[str] = CorpusService.find_matches(urn, aql, is_csm=True)
if len(search_phenomena) == 1:
# it's cloze or markWords; the solutions only have a target, no explicit value
- if search_phenomena[0] == Phenomenon.dependency:
+ if search_phenomena[0] == Phenomenon.DEPENDENCY:
node_ids = [node_ids[i] for i in range(len(node_ids)) if i % 2 != 0]
matches += [Solution(target=make_solution_element_from_salt_id(x)) for x in node_ids]
else:
matches += [Solution(target=make_solution_element_from_salt_id(x)) for x in node_ids]
else:
# it's a matching exercise
- if search_phenomena[0] == Phenomenon.dependency:
+ if search_phenomena[0] == Phenomenon.DEPENDENCY:
for i in range(len(node_ids)):
if i % 3 == 0:
matches.append(Solution(
diff --git a/mc_backend/mcserver/app/services/fileService.py b/mc_backend/mcserver/app/services/fileService.py
index 9a70ac0..8ce81c5 100644
--- a/mc_backend/mcserver/app/services/fileService.py
+++ b/mc_backend/mcserver/app/services/fileService.py
@@ -26,7 +26,7 @@ class FileService:
def create_tmp_file(file_type: FileType, file_id: str) -> DownloadableFile:
""" Creates a new temporary file and adds it to the FileService watch list. """
# generate temp file
- (handle, path) = mkstemp(suffix=".{0}".format(file_type.value), dir=Config.TMP_DIRECTORY)
+ (handle, path) = mkstemp(suffix=".{0}".format(file_type), dir=Config.TMP_DIRECTORY)
# grant all permissions for the file to everybody, so Docker can handle the files during builds / updates
os.fchmod(handle, 0o777)
file_name: str = os.path.basename(path)
@@ -85,6 +85,7 @@ class FileService:
vocabulary_file_content: str = FileService.get_file_content(
os.path.join(Config.ASSETS_DIRECTORY, vocabulary_corpus.value))
vocabulary_list: List[str] = json.loads(vocabulary_file_content)
+ frequency_upper_bound = frequency_upper_bound if frequency_upper_bound else len(vocabulary_list)
return set(vocabulary_list[:frequency_upper_bound])
@staticmethod
@@ -120,16 +121,16 @@ class FileService:
if solution_indices is not None:
solutions = [solutions[x] for x in solution_indices]
# write the relevant content to the file
- if file_type == FileType.pdf:
+ if file_type == FileType.PDF:
html_string: str = FileService.get_pdf_html_string(exercise, conll, file_type, solutions)
with open(existing_file.file_path, "wb+") as f:
pdf = pisa.CreatePDF(StringIO(html_string), f)
- elif file_type == FileType.xml:
+ elif file_type == FileType.XML:
# export exercise data to XML
xml_string: str = XMLservice.create_xml_string(exercise, conll, file_type, solutions)
with open(existing_file.file_path, "w+") as f:
f.write(xml_string)
- elif file_type == FileType.docx:
+ elif file_type == FileType.DOCX:
FileService.make_docx_file(exercise, existing_file.file_path, conll, file_type, solutions)
return existing_file
@@ -138,10 +139,10 @@ class FileService:
"""Creates a temporary file for the HTML content, so the users can download it."""
existing_file: DownloadableFile = FileService.create_tmp_file(file_type, urn)
# write the relevant content to the file
- if file_type == FileType.pdf:
+ if file_type == FileType.PDF:
with open(existing_file.file_path, "wb+") as f:
pdf = pisa.CreatePDF(StringIO(html_content), f)
- elif file_type == FileType.docx:
+ elif file_type == FileType.DOCX:
soup: BeautifulSoup = BeautifulSoup(html_content, 'html.parser')
doc: Document = Document()
par1: Paragraph = doc.add_paragraph(soup.p.text)
diff --git a/mc_backend/mcserver/app/services/frequencyService.py b/mc_backend/mcserver/app/services/frequencyService.py
index 4ffbee6..768f033 100644
--- a/mc_backend/mcserver/app/services/frequencyService.py
+++ b/mc_backend/mcserver/app/services/frequencyService.py
@@ -5,39 +5,43 @@ from graphannis.cs import FrequencyTableEntry
from mcserver.app.services import AnnotationService
from mcserver.config import Config
-from mcserver.app.models import Phenomenon, FrequencyAnalysis, FrequencyItem, GraphData, Dependency, LinkMC, NodeMC
+from mcserver.app.models import Phenomenon, FrequencyItem, GraphData, Dependency, LinkMC, NodeMC, Feats
class FrequencyService:
""" Service for calculating word (or construction) frequencies in text corpora. """
@staticmethod
- def add_case_frequencies(urn: str, search_phenomenon: List[Phenomenon]) -> FrequencyAnalysis:
- """ Adds frequency information for case annotations in a corpus. """
- aql: str = f"{Phenomenon.case.value}={Config.AQL_CASE} {Config.AQL_DEP} "
- definition: str = f"1:{search_phenomenon[0].value},2:"
- fa: FrequencyAnalysis = FrequencyAnalysis()
- if search_phenomenon[1] == Phenomenon.case:
- aql += f"{Phenomenon.case.value}={Config.AQL_CASE}"
- definition += search_phenomenon[1].value
- result: List[FrequencyTableEntry] = Config.CORPUS_STORAGE_MANAGER.frequency(
- corpus_name=urn, query=aql, definition=definition)
- fa += [FrequencyItem([x.values[0], x.values[1]], search_phenomenon, x.count) for x in result]
+ def add_case_frequencies(urn: str, search_phenomena: List[Phenomenon]) -> List[FrequencyItem]:
+ """ Adds frequency information for all case annotations in a corpus. """
+ aql: str = f"{Phenomenon.FEATS}={Config.AQL_CASE} {Config.AQL_DEP} "
+ definition: str = f"1:{search_phenomena[0]},2:"
+ fa: List[FrequencyItem] = []
+ if search_phenomena[1] == Phenomenon.FEATS:
+ aql += f"{Phenomenon.FEATS}={Config.AQL_CASE}"
+ fa += FrequencyService.add_case_frequency_items(urn, aql, definition, search_phenomena)
else:
- aql += search_phenomenon[1].value
- definition += search_phenomenon[1].value
- result: List[FrequencyTableEntry] = Config.CORPUS_STORAGE_MANAGER.frequency(
- corpus_name=urn, query=aql, definition=definition)
- fa += [FrequencyItem([x.values[0], x.values[1]], search_phenomenon, x.count) for x in result]
+ aql += search_phenomena[1]
+ fa += FrequencyService.add_case_frequency_items(urn, aql, definition, search_phenomena)
return fa
@staticmethod
- def add_dependency_frequencies(graph_data: GraphData, fa: FrequencyAnalysis):
+ def add_case_frequency_items(urn: str, aql: str, definition: str, search_phenomena: List[Phenomenon]) -> \
+ List[FrequencyItem]:
+ """Adds frequency information for specific case annotations in a corpus."""
+ definition += search_phenomena[1]
+ result: List[FrequencyTableEntry] = Config.CORPUS_STORAGE_MANAGER.frequency(
+ corpus_name=urn, query=aql, definition=definition)
+ return [FrequencyItem(x.count, search_phenomena, [x.values[0], x.values[1]]) for x in
+ result]
+
+ @staticmethod
+ def add_dependency_frequencies(graph_data: GraphData, fa: List[FrequencyItem]):
""" Performs a frequency analysis for dependency annotations in a corpus. """
id_to_node_dict: Dict[str, int] = {graph_data.nodes[i].id: i for i in range(len(graph_data.nodes))}
dep_to_enum_dict: Dict[str, Dependency] = {}
- for key in AnnotationService.phenomenon_map[Phenomenon.dependency]:
- for value in AnnotationService.phenomenon_map[Phenomenon.dependency][key]:
+ for key in AnnotationService.phenomenon_map[Phenomenon.DEPENDENCY]:
+ for value in AnnotationService.phenomenon_map[Phenomenon.DEPENDENCY][key]:
dep_to_enum_dict[value] = Dependency[key]
dep_links: List[LinkMC] = [link for link in graph_data.links if
link.annis_component_name == Config.GRAPHANNIS_DEPENDENCY_LINK]
@@ -49,17 +53,19 @@ class FrequencyService:
id_to_target_link_dict[link.target] = id_to_target_link_dict.get(link.target, []) + [link]
values_to_fi_dict: Dict[str, FrequencyItem] = {}
for link in dep_links:
- base_node: NodeMC = graph_data.nodes[id_to_node_dict[link.source]]
- if not link.udep_deprel:
+ if not link.udep_deprel or not dep_to_enum_dict.get(link.udep_deprel):
continue
dep: Dependency = dep_to_enum_dict[link.udep_deprel]
+ base_node: NodeMC = graph_data.nodes[id_to_node_dict[link.source]]
FrequencyService.add_frequency_item(base_node, dep, values_to_fi_dict, 1)
for other_link in id_to_source_link_dict.get(link.target, []):
+ if not dep_to_enum_dict.get(other_link.udep_deprel):
+ continue
base_node = graph_data.nodes[id_to_node_dict[other_link.target]]
FrequencyService.add_frequency_item(base_node, dep, values_to_fi_dict, 0)
FrequencyService.increase_frequency_count(
[dep.name, dep_to_enum_dict[other_link.udep_deprel].name],
- [Phenomenon.dependency, Phenomenon.dependency], values_to_fi_dict)
+ [Phenomenon.DEPENDENCY, Phenomenon.DEPENDENCY], values_to_fi_dict)
for fi in values_to_fi_dict.values():
fa.append(fi)
@@ -68,50 +74,51 @@ class FrequencyService:
target_index: int):
""" Builds a collection of frequency items for given dependency links. """
values_list: List[List[str]] = [[base_node.udep_feats], [base_node.udep_lemma], [base_node.udep_upostag]]
- phenomena_list: List[List[Phenomenon]] = [[Phenomenon.case], [Phenomenon.lemma], [Phenomenon.partOfSpeech]]
+ phenomena_list: List[List[Phenomenon]] = [[Phenomenon.FEATS], [Phenomenon.LEMMA], [Phenomenon.UPOSTAG]]
for vl in values_list:
vl.insert(target_index, dep.name)
for pl in phenomena_list:
- pl.insert(target_index, Phenomenon.dependency)
- if not base_node.udep_feats or Phenomenon.case.name not in base_node.udep_feats.lower():
+ pl.insert(target_index, Phenomenon.DEPENDENCY)
+ if not base_node.udep_feats or Phenomenon.FEATS not in base_node.udep_feats.lower():
values_list.pop(0)
phenomena_list.pop(0)
for i in range(len(values_list)):
FrequencyService.increase_frequency_count(values_list[i], phenomena_list[i], values_to_fi_dict)
@staticmethod
- def add_generic_frequencies(urn: str, search_phenomenon: List[Phenomenon]) -> FrequencyAnalysis:
+ def add_generic_frequencies(urn: str, search_phenomena: List[Phenomenon]) -> List[FrequencyItem]:
""" Adds frequency information for case and lemma annotations in a corpus. """
- aql: str = f"{search_phenomenon[0].value} {Config.AQL_DEP} "
- definition: str = f"1:{search_phenomenon[0].value},2:"
- fa: FrequencyAnalysis = FrequencyAnalysis()
- if search_phenomenon[1] == Phenomenon.case:
- aql += f"{Phenomenon.case.value}={Config.AQL_CASE}"
- definition += Phenomenon.case.value
+ aql: str = f"{search_phenomena[0]} {Config.AQL_DEP} "
+ definition: str = f"1:{search_phenomena[0]},2:"
+ fa: List[FrequencyItem] = []
+ if search_phenomena[1] == Phenomenon.FEATS:
+ aql += f"{Phenomenon.FEATS}={Config.AQL_CASE}"
+ definition += Phenomenon.FEATS
result: List[FrequencyTableEntry] = Config.CORPUS_STORAGE_MANAGER.frequency(
corpus_name=urn, query=aql, definition=definition)
- fa += [FrequencyItem(x.values, search_phenomenon, x.count) for x in result]
+ fa += [FrequencyItem(x.count, search_phenomena, x.values) for x in result]
else:
- aql += search_phenomenon[1].value
- definition += search_phenomenon[1].value
+ aql += search_phenomena[1]
+ definition += search_phenomena[1]
result: List[FrequencyTableEntry] = Config.CORPUS_STORAGE_MANAGER.frequency(
corpus_name=urn, query=aql, definition=definition)
- fa += [FrequencyItem(x.values, search_phenomenon, x.count) for x in result]
+ fa += [FrequencyItem(x.count, search_phenomena, x.values) for x in result]
return fa
@staticmethod
- def extract_case_values(fa: FrequencyAnalysis) -> FrequencyAnalysis:
+ def extract_case_values(fa: List[FrequencyItem]) -> List[FrequencyItem]:
""" Checks if features were involved in the search and, if yes, extracts the case values from them. """
values_to_fi_dict: Dict[str, List[FrequencyItem]] = {}
for fi in fa:
- target_indices: List[int] = [i for i in range(len(fi.phenomena)) if fi.phenomena[i] == Phenomenon.case]
+ target_indices: List[int] = [i for i in range(len(fi.phenomena)) if
+ fi.phenomena[i] == Phenomenon.FEATS]
for ti in target_indices:
value_parts: List[str] = fi.values[ti].split("|")
- case_string: str = next(x for x in value_parts if Phenomenon.case.name in x.lower())
+ case_string: str = next(x for x in value_parts if Feats.Case.value in x.lower())
fi.values[ti] = case_string.split("=")[1]
values_combined: str = "".join(fi.values)
values_to_fi_dict[values_combined] = values_to_fi_dict.get(values_combined, []) + [fi]
- ret_val: FrequencyAnalysis = FrequencyAnalysis()
+ ret_val: List[FrequencyItem] = []
# remove duplicates that have the same values
for key in values_to_fi_dict:
new_fi: FrequencyItem = values_to_fi_dict[key][0]
diff --git a/mc_backend/mcserver/app/services/xmlService.py b/mc_backend/mcserver/app/services/xmlService.py
index 8a2fba2..e364160 100644
--- a/mc_backend/mcserver/app/services/xmlService.py
+++ b/mc_backend/mcserver/app/services/xmlService.py
@@ -97,7 +97,7 @@ class XMLservice:
for solution in solutions:
gap_counter += 1
target_token: OrderedDict = TextService.get_token_by_salt_id(solution.target.salt_id, conll)
- target_token["form"] = "[[{0}]]".format(gap_counter) if file_type == FileType.xml else "_" * max_gap_length
+ target_token["form"] = "[[{0}]]".format(gap_counter) if file_type == FileType.XML else "_" * max_gap_length
return TextService.strip_whitespace(" ".join([y["form"] for x in conll for y in x]))
@staticmethod
diff --git a/mc_backend/mcserver/config.py b/mc_backend/mcserver/config.py
index 0020bb2..65c3cc7 100644
--- a/mc_backend/mcserver/config.py
+++ b/mc_backend/mcserver/config.py
@@ -1,5 +1,6 @@
"""Application configuration classes for different environments / use cases"""
import os
+from pathlib import Path
from dotenv import load_dotenv
from graphannis.cs import CorpusStorageManager
@@ -19,6 +20,7 @@ class Config(object):
CURRENT_WORKING_DIRECTORY_PARENT = os.path.dirname(CURRENT_WORKING_DIRECTORY)
CURRENT_WORKING_DIRECTORY_PARTS = os.path.split(CURRENT_WORKING_DIRECTORY) # [::-1]
GRAPH_DATABASE_DIR = os.path.join(os.sep, "tmp", "graphannis-data")
+ CSM_DIRECTORY = os.path.join(CURRENT_WORKING_DIRECTORY, "csm")
MC_SERVER_DIRECTORY = CURRENT_WORKING_DIRECTORY if \
os.path.split(CURRENT_WORKING_DIRECTORY)[-1] == "mcserver" else os.path.join(CURRENT_WORKING_DIRECTORY,
"mcserver")
@@ -33,9 +35,9 @@ class Config(object):
TREEBANKS_PATH = os.path.join(ASSETS_DIRECTORY, "treebanks")
TREEBANKS_PROIEL_PATH = os.path.join(TREEBANKS_PATH, "proiel")
- API_SPEC_JSON_FILE_NAME = "openapi.json"
- API_SPEC_JSON_FILE_PATH = os.path.join(MC_SERVER_DIRECTORY, API_SPEC_JSON_FILE_NAME)
- API_SPEC_YAML_FILE_PATH = os.path.join(MC_SERVER_DIRECTORY, "mcserver_api.yaml")
+ API_SPEC_CSM_FILE_PATH = os.path.join(CSM_DIRECTORY, "csm_api.yaml")
+ API_SPEC_MCSERVER_FILE_PATH = os.path.join(MC_SERVER_DIRECTORY, "mcserver_api.yaml")
+ API_SPEC_MODELS_YAML_FILE_PATH = os.path.join(Path(MC_SERVER_DIRECTORY).parent, "openapi_models.yaml")
AQL_CASE = "/.*Case=.*/"
AQL_DEP = "->dep"
AQL_DEPREL = "deprel"
@@ -47,7 +49,6 @@ class Config(object):
CORPUS_STORAGE_MANAGER_PORT = 6555
COVERAGE_CONFIGURATION_FILE_NAME = ".coveragerc"
COVERAGE_ENVIRONMENT_VARIABLE = "COVERAGE_PROCESS_START"
- CSM_DIRECTORY = os.path.join(CURRENT_WORKING_DIRECTORY, "csm")
CSRF_ENABLED = True
CTS_API_BASE_URL = "https://cts.perseids.org/api/cts/"
CUSTOM_CORPUS_CAES_GAL_FILE_PATH = os.path.join(TREEBANKS_PROIEL_PATH, "caes-gal.conllu")
@@ -129,7 +130,7 @@ class Config(object):
SERVER_URI_H5P = SERVER_URI_BASE + "h5p"
SERVER_URI_KWIC = SERVER_URI_BASE + "kwic"
SERVER_URI_RAW_TEXT = SERVER_URI_BASE + "rawtext"
- SERVER_URI_STATIC_EXERCISES = SERVER_URI_BASE + "exercises"
+ SERVER_URI_STATIC_EXERCISES = SERVER_URI_BASE + "staticExercises"
SERVER_URI_TEXT_COMPLEXITY = SERVER_URI_BASE + "textcomplexity"
SERVER_URI_VALID_REFF = SERVER_URI_BASE + "validReff"
SERVER_URI_VECTOR_NETWORK = SERVER_URI_BASE + "vectorNetwork"
diff --git a/mc_backend/mcserver/mcserver_api.yaml b/mc_backend/mcserver/mcserver_api.yaml
index e9ab2c4..fbf51da 100644
--- a/mc_backend/mcserver/mcserver_api.yaml
+++ b/mc_backend/mcserver/mcserver_api.yaml
@@ -17,7 +17,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/Corpus'
+ $ref: '../openapi_models.yaml#/components/schemas/Corpus'
parameters:
- name: last_update_time
in: query
@@ -55,7 +55,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/Corpus'
+ $ref: '../openapi_models.yaml#/components/schemas/Corpus'
patch:
summary: Updates a single corpus by ID.
operationId: mcserver.app.api.corpusAPI.patch
@@ -65,7 +65,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/Corpus'
+ $ref: '../openapi_models.yaml#/components/schemas/Corpus'
parameters:
- name: author
in: query
@@ -98,7 +98,7 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/AnnisResponse'
+ $ref: '../openapi_models.yaml#/components/schemas/AnnisResponse'
parameters:
- name: eid
in: query
@@ -116,595 +116,158 @@ paths:
content:
application/json:
schema:
- $ref: '#/components/schemas/AnnisResponse'
+ $ref: '../openapi_models.yaml#/components/schemas/AnnisResponse'
+ requestBody:
+ required: true
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '../openapi_models.yaml#/components/schemas/ExerciseForm'
+ /exerciseList:
+ get:
+ summary: Provides metadata for all available exercises.
+ operationId: mcserver.app.api.exerciseListAPI.get
+ responses:
+ 200:
+ description: Data for interactive exercises, excluding the linguistic details.
+ content:
+ application/json:
+ schema:
+ $ref: '../openapi_models.yaml#/components/schemas/Exercise'
+ parameters:
+ - name: lang
+ in: query
+ description: ISO 639-1 Language Code for the localization of exercise content.
+ required: true
+ schema:
+ type: string
+ example: en
+ - name: frequency_upper_bound
+ in: query
+ description: Upper bound for reference vocabulary frequency.
+ required: false
+ schema:
+ type: integer
+ example: 500
+ default: 0
+ - name: last_update_time
+ in: query
+ description: Time (in milliseconds) of the last update.
+ required: false
+ schema:
+ type: integer
+ example: 123456789
+ default: 0
+ - name: vocabulary
+ in: query
+ description: Identifier for a reference vocabulary.
+ required: false
+ schema:
+ type: string
+ enum: [agldt, bws, proiel, viva]
+ example: agldt
+ /file:
+ get:
+ summary: Provides the URL to download a specific file.
+ operationId: mcserver.app.api.fileAPI.get
+ responses:
+ 200:
+ description: Data for interactive exercises, excluding the linguistic details.
+ content:
+ application/json:
+ schema:
+ $ref: '../openapi_models.yaml#/components/schemas/Exercise'
+ parameters:
+ - name: id
+ in: query
+ description: Unique identifier (UUID) for an exercise.
+ required: true
+ schema:
+ type: string
+ example: 12345678-1234-5678-1234-567812345678
+ - name: type
+ in: query
+ description: File format for the requested download.
+ required: true
+ schema:
+ $ref: '../openapi_models.yaml#/components/schemas/FileType'
+ - name: solution_indices
+ in: query
+ description: Indices for the solutions that should be included in the download.
+ required: false
+ schema:
+ type: array
+ items:
+ type: integer
+ example: 0
+ default: []
+ post:
+ summary: Serializes and persists learning results or HTML content for later access.
+ operationId: mcserver.app.api.fileAPI.post
+ responses:
+ 200:
+ description: Indication of success, possibly a reference to the resulting file.
+ content:
+ application/json:
+ schema:
+ type: string
+ description: Indication of success, or name of the file that was generated.
+ example: 12345678-1234-5678-1234-567812345678.pdf
requestBody:
required: true
content:
application/x-www-form-urlencoded:
schema:
- x-body-name: exercise_data
type: object
- $ref: '#/components/schemas/ExerciseForm'
- /exercises:
+ description: Data that should be serialized and persisted.
+ x-body-name: file_data
+ properties:
+ file_type:
+ $ref: '../openapi_models.yaml#/components/schemas/FileType'
+ html_content:
+ type: string
+ description: HTML content to be serialized.
+ example:
+ learning_result:
+ type: string
+ description: Serialized XAPI results for an interactive exercise.
+ example: "{'0': {}}"
+ urn:
+ type: string
+ description: CTS URN for the text passage from which the HTML content was created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ /frequency:
+ get:
+ summary: Returns results for a frequency query from ANNIS for a given CTS URN.
+ operationId: mcserver.app.api.frequencyAPI.get
+ responses:
+ 200:
+ description: Frequency analysis, i.e. a list of frequency items.
+ content:
+ application/json:
+ schema:
+ type: array
+ description: List of items with frequency data for linguistic phenomena.
+ items:
+ $ref: "../openapi_models.yaml#/components/schemas/FrequencyItem"
+ parameters:
+ - name: urn
+ in: query
+ description: CTS URN for referencing the corpus.
+ required: true
+ schema:
+ type: string
+ example: urn:cts:latinLit:phi1254.phi001.perseus-lat2:5.6.21-5.6.21
+ /staticExercises:
get:
summary: Returns metadata for static exercises.
operationId: mcserver.app.api.staticExercisesAPI.get
responses:
200:
- description: Metadata for static exercises, including their respective URIs in the frontend.
+ description: Metadata for static exercises, mapped to their respective URIs in the frontend.
content:
application/json:
schema:
-# TODO: specify object properties
type: object
-components:
- schemas:
- AnnisResponse:
- description: A response with graph data from ANNIS, possibly with additional data for exercises.
- type: object
- properties:
- exercise_id:
- type: string
- description: Unique identifier (UUID) for the exercise.
- example: 12345678-1234-5678-1234-567812345678
- exercise_type:
- type: string
- description: Type of exercise, concerning interaction and layout.
- example: ddwtos
- frequency_analysis:
- type: array
- description: List of items with frequency data for linguistic phenomena.
- items:
- $ref: "#/components/schemas/FrequencyItem"
- graph_data:
- $ref: "#/components/schemas/GraphData"
- solutions:
- type: array
- description: Correct solutions for this exercise.
- items:
- $ref: '#/components/schemas/Solution'
- text_complexity:
- $ref: '#/components/schemas/TextComplexity'
- uri:
- type: string
- description: URI for accessing the exercise in this API.
- example: /mc/api/v1.0/file/fd97630c-1f5a-4102-af56-20eb0babdfee
- Corpus: # Object definition
- description: Collection of texts.
- type: object # Data type
- x-tablename: Corpus
- properties:
- author:
- type: string
- description: Author of the texts in the corpus.
- example: Aulus Gellius
- default: "Anonymus"
- nullable: false
- cid:
- type: integer
- description: Unique identifier for the corpus.
- example: 1
- x-primary-key: true
- x-autoincrement: true
- citation_level_1:
- type: string
- description: First level for citing the corpus.
- example: Book
- default: default
- citation_level_2:
- type: string
- description: Second level for citing the corpus.
- example: Chapter
- default: default
- citation_level_3:
- type: string
- description: Third level for citing the corpus.
- example: Section
- default: default
- source_urn:
- type: string
- description: CTS base URN for referencing the corpus.
- example: urn:cts:latinLit:phi1254.phi001.perseus-lat2
- x-unique: true
- title:
- type: string
- description: Corpus title.
- example: Noctes Atticae
- nullable: false
- default: Anonymus
- required:
- - source_urn
- Exercise:
- allOf:
- - $ref: "#/components/schemas/ExerciseBase"
- - description: Data for creating and evaluating interactive exercises.
- type: object # Data type
- x-tablename: Exercise
- properties:
- conll:
- type: string
- description: CONLL-formatted linguistic annotations represented as a single string.
- example: \# newdoc id = ...\n# sent_id = 1\n# text = Caesar fortis est.\n1\tCaesar\tCaeso\tVERB ...
- default: ""
- nullable: false
- eid:
- type: string
- description: Unique identifier (UUID) for the exercise.
- example: 12345678-1234-5678-1234-567812345678
- x-primary-key: true
- exercise_type:
- type: string
- description: Type of exercise, concerning interaction and layout.
- example: markWords
- default: ""
- nullable: false
- exercise_type_translation:
- type: string
- description: Localized expression of the exercise type.
- example: Cloze
- default: ""
- language:
- type: string
- description: ISO 639-1 Language Code for the localization of exercise content.
- example: en
- default: de
- last_access_time:
- type: number
- format: float
- description: When the exercise was last accessed (as POSIX timestamp).
- example: 1234567.789
- x-index: true
- solutions:
- type: string
- description: Correct solutions for the exercise.
- example: "[{'target': {'sentence_id': 1, 'token_id': 7, 'salt_id': 'salt:/urn:...', 'content': 'eo'}, 'value': {'sentence_id': 0, 'token_id': 0, 'content': None, 'salt_id': 'salt:/urn:...'}}]"
- default: "[]"
- nullable: false
- text_complexity:
- type: number
- format: float
- description: Overall text complexity as measured by the software's internal language analysis.
- example: 54.53
- default: 0
- urn:
- type: string
- description: CTS URN for the text passage from which the exercise was created.
- example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
- default: ""
- nullable: false
- required:
- - eid
- - last_access_time
- ExerciseBase:
- description: Base data for creating and evaluating interactive exercises.
- type: object
- properties:
- correct_feedback:
- type: string
- description: Feedback for successful completion of the exercise.
- example: Well done!
- default: ""
- general_feedback:
- type: string
- description: Feedback for finishing the exercise.
- example: You have finished the exercise.
- default: ""
- incorrect_feedback:
- type: string
- description: Feedback for failing to complete the exercise successfully.
- example: Unfortunately, that answer is wrong.
- default: ""
- instructions:
- type: string
- description: Hints for how to complete the exercise.
- example: Fill in the gaps!
- default: ""
- partially_correct_feedback:
- type: string
- description: Feedback for successfully completing certain parts of the exercise.
- example: Some parts of this answer are correct.
- default: ""
- search_values:
- type: string
- description: Search queries that were used to build the exercise.
- example: "['upostag=noun', 'dependency=object']"
- default: "[]"
- work_author:
- type: string
- description: Name of the person who wrote the base text for the exercise.
- example: C. Iulius Caesar
- default: ""
- work_title:
- type: string
- description: Title of the base text for the exercise.
- example: Noctes Atticae
- default: ""
- required:
- - instructions
- - search_values
- ExerciseForm:
- allOf:
- - $ref: '#/components/schemas/ExerciseBase'
- - description: Additional exercise data.
- type: object
- properties:
- type:
- type: string
- description: Type of exercise, concerning interaction and layout.
- example: markWords
- type_translation:
- type: string
- description: Localized expression of the exercise type.
- example: Cloze
- urn:
- type: string
- description: CTS URN for the text passage from which the exercise was created.
- example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
- required:
- - type
- FrequencyItem:
- type: object
- properties:
- count:
- type: integer
- description: How often the given combination of values occurred.
- example: 1
- phenomena:
- type: array
- description: Labels for the phenomena described in this frequency entry.
- example: []
- items:
- type: string
- values:
- type: array
- description: Values for the phenomena described in this frequency entry.
- example: []
- items:
- type: string
- GraphData:
- type: object
- description: Nodes, edges and metadata for a graph.
- properties:
- directed:
- type: boolean
- description: Whether edges in the returned graph are directed.
- example: true
- graph:
- type: object
- description: Additional graph data.
- example: {}
- links:
- type: array
- description: List of edges for the graph.
- items:
- $ref: '#/components/schemas/Link'
- multigraph:
- type: boolean
- description: Whether the graph consists of multiple subgraphs.
- example: true
- nodes:
- type: array
- description: List of nodes for the graph.
- items:
- $ref: '#/components/schemas/NodeMC'
- required:
- - links
- - nodes
- LearningResult:
- description: Learner data for completed exercises.
- type: object
- x-tablename: LearningResult
- properties:
- actor_account_name:
- type: string
- description: H5P user ID, usually unique per device.
- example: ebea3f3e-7410-4215-b34d-c1417f7c7c18
- default: ""
- actor_object_type:
- type: string
- description: Describes the kind of object that was recognized as actor.
- example: Agent
- default: ""
- category_id:
- type: string
- description: Link to the exercise type specification.
- example: http://h5p.org/libraries/H5P.MarkTheWords-1.9
- default: ""
- category_object_type:
- type: string
- description: Describes the kind of object that was recognized as exercise.
- example: Activity
- default: ""
- choices:
- type: string
- description: JSON string containing a list of possible choices, each with ID and description.
- example: "[{'id':'2','description':{'en-US':'Quintus ist bei allen in der Provinz beliebt.\n'}},{'id':'3','description':{'en-US':'Asia ist eine unbekannte Provinz.\n'}}]"
- default: "[]"
- completion:
- type: boolean
- description: Whether the exercise was fully processed or not.
- example: true
- correct_responses_pattern:
- type: string
- description: JSON string containing a list of possible solutions to the exercise, given as patterns of answers.
- example: "['0[,]1[,]2']"
- created_time:
- type: number
- format: float
- description: When the learner data was received (POSIX timestamp).
- example: 1234567.789
- x-index: true
- x-primary-key: true
- duration:
- type: string
- description: How many seconds it took a learner to complete the exercise.
- example: PT9.19S
- default: "PT0S"
- extensions:
- type: string
- description: JSON string containing a mapping of keys and values (usually the local content ID, i.e. a versioning mechanism).
- example: "{'http://h5p.org/x-api/h5p-local-content-id':1}"
- default: "{}"
- interaction_type:
- type: string
- description: Exercise type.
- example: choice
- default: ""
- object_definition_description:
- type: string
- description: Exercise content, possibly including instructions.
- example: "Bestimme die Form von custodem im Satz: Urbs custodem non tyrannum, domus hospitem non expilatorem recepit.\n"
- object_definition_type:
- type: string
- description: Type of object definition that is presented to the user.
- example: http://adlnet.gov/expapi/activities/cmi.interaction
- default: ""
- object_object_type:
- type: string
- description: Type of object that is presented to the user.
- example: Activity
- default: ""
- response:
- type: string
- description: Answer provided by the user, possibly as a pattern.
- example: His in rebus[,]sociis[,]civibus[,]rei publicae
- score_max:
- type: integer
- description: Maximum possible score to be achieved in this exercise.
- example: 1
- score_min:
- type: integer
- description: Minimum score to be achieved in this exercise.
- example: 0
- score_raw:
- type: integer
- description: Score that was actually achieved by the user in this exercise.
- example: 1
- score_scaled:
- type: number
- format: float
- description: Relative score (between 0 and 1) that was actually achieved by the user in this exercise.
- example: 0.8889
- default: 0
- success:
- type: boolean
- description: Whether the exercise was successfully completed or not.
- example: true
- verb_display:
- type: string
- description: Type of action that was performed by the user.
- example: answered
- default: ""
- verb_id:
- type: string
- description: Link to the type of action that was performed by the user.
- example: http://adlnet.gov/expapi/verbs/answered
- default: ""
- required:
- - completion
- - correct_responses_pattern
- - created_time
- - object_definition_description
- - response
- - score_max
- - score_min
- - score_raw
- - success
- Link:
- type: object
- properties:
- annis_component_name:
- type: string
- description: Component name as given by ANNIS.
- example: dep
- annis_component_type:
- type: string
- description: Component type as given by ANNIS.
- example: Pointing
- source:
- type: string
- description: ID of the source node for the edge.
- example: salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok1
- target:
- type: string
- description: ID of the target node for the edge.
- example: salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok3
- udep_deprel:
- type: string
- description: Dependency relation described by the edge.
- example: "det"
- NodeMC:
- type: object
- properties:
- annis_node_name:
- type: string
- description: Node name as given by ANNIS.
- example: "urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok1"
- annis_node_type:
- type: string
- description: Node type as given by ANNIS.
- example: "node"
- annis_tok:
- type: string
- description: Raw word form as given by ANNIS.
- example: "Galliae"
- annis_type:
- type: string
- description: Node type as given by ANNIS (?).
- example: "node"
- id:
- type: string
- description: Unique identifier for the node in the SALT model.
- example: "salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok1"
- is_oov:
- type: boolean
- description: Whether the raw word form is missing in a given vocabulary.
- example: true
- udep_lemma:
- type: string
- description: Lemmatized word form.
- example: "Gallia"
- udep_upostag:
- type: string
- description: Universal part of speech tag for the word form.
- example: "PROPN"
- udep_xpostag:
- type: string
- description: Language-specific part of speech tag for the word form.
- example: "Ne"
- udep_feats:
- type: string
- description: Additional morphological information.
- example: "Case=Nom|Gender=Fem|Number=Sing"
- solution:
- type: string
- description: Solution value for this node in an exercise.
- example: ""
- Solution:
- type: object
- description: Correct solution for an exercise.
- properties:
- target:
- $ref: '#/components/schemas/SolutionElement'
- value:
- $ref: '#/components/schemas/SolutionElement'
- SolutionElement:
- type: object
- description: Target or value of a correct solution for an exercise.
- properties:
- content:
- type: string
- description: Content of the solution element.
- example: unam
- salt_id:
- type: string
- description: Unique identifier for the node in the SALT model.
- example: salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok9
- sentence_id:
- type: integer
- description: Unique identifier for the sentence in a corpus.
- example: 52548
- token_id:
- type: integer
- description: Unique identifier for the token in a sentence.
- example: 9
- required:
- - sentence_id
- - token_id
- TextComplexity:
- type: object
- description: Mapping of various elements of text complexity to their corresponding values.
- properties:
- all:
- type: number
- format: float
- description: Overall text complexity of the given corpus.
- example: 42.31
- avg_w_len:
- type: number
- format: float
- description: Average length of a word in the given corpus.
- example: 5.4
- avg_w_per_sent:
- type: number
- format: float
- description: Average number of words per sentence.
- example: 5.4
- lex_den:
- type: number
- format: float
- minimum: 0
- maximum: 1
- description: Lexical density of the given corpus.
- example: 0.43
- n_abl_abs:
- type: integer
- description: Number of ablativi absoluti in the given corpus.
- example: 1
- n_clause:
- type: integer
- description: Number of clauses in the given corpus.
- example: 1
- n_gerund:
- type: integer
- description: Number of gerunds in the given corpus.
- example: 1
- n_inf:
- type: integer
- description: Number of infinitives in the given corpus.
- example: 1
- n_part:
- type: integer
- description: Number of participles in the given corpus.
- example: 1
- n_punct:
- type: integer
- description: Number of punctuation signs in the given corpus.
- example: 1
- n_sent:
- type: integer
- description: Number of sentences in the given corpus.
- example: 1
- n_subclause:
- type: integer
- description: Number of subclauses in the given corpus.
- example: 1
- n_types:
- type: integer
- description: Number of distinct word forms in the given corpus.
- example: 1
- n_w:
- type: integer
- description: Number of words in the given corpus.
- example: 1
- pos:
- type: integer
- description: Number of distinct part of speech tags in the given corpus.
- example: 1
- UpdateInfo:
- description: Timestamps for updates of various resources.
- type: object
- x-tablename: UpdateInfo
- properties:
- created_time:
- type: number
- format: float
- description: When the resource was created (as POSIX timestamp).
- example: 1234567.789
- x-index: true
- last_modified_time:
- type: number
- format: float
- description: When the resource was last modified (as POSIX timestamp).
- example: 1234567.789
- x-index: true
- resource_type:
- type: string
- enum: [cts_data, exercise_list, file_api_clean]
- description: Name of the resource for which update timestamps are indexed.
- example: cts_data
- x-primary-key: true
- required:
- - created_time
- - last_modified_time
- - resource_type
+ additionalProperties:
+ $ref: '../openapi_models.yaml#/components/schemas/StaticExercise'
diff --git a/mc_backend/mcserver/migrations/env.py b/mc_backend/mcserver/migrations/env.py
index 24346e3..cc91356 100644
--- a/mc_backend/mcserver/migrations/env.py
+++ b/mc_backend/mcserver/migrations/env.py
@@ -5,7 +5,7 @@ from logging.config import fileConfig
import logging
import open_alchemy
-open_alchemy.init_yaml(spec_filename="mcserver/mcserver_api.yaml")
+open_alchemy.init_yaml(spec_filename="openapi_models.yaml")
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
diff --git a/mc_backend/mocks.py b/mc_backend/mocks.py
index 7b61cb0..a3a9540 100644
--- a/mc_backend/mocks.py
+++ b/mc_backend/mocks.py
@@ -670,8 +670,8 @@ class Mocks:
"salt_id": "salt:/urn:custom:latinLit:proiel.pal-agr.lat:1.1.1/doc1#sent159692tok1"}}],
"conll": "# newdoc id = /var/folders/30/yqnv6lz56r14dqhpw18knn2r0000gp/T/tmp7qn86au9\n# sent_id = 1\n# text = Caesar fortis est.\n1\tCaesar\tCaeso\tVERB\tC1|grn1|casA|gen1|stAN\tCase=Nom|Degree=Pos|Gender=Masc|Number=Sing\t2\tcsubj\t_\t_\n2\tfortis\tfortis\tADJ\tC1|grn1|casA|gen1|stAN\tCase=Nom|Degree=Pos|Gender=Masc|Number=Sing\t0\troot\troot\t_\n3\test\tsum\tAUX\tN3|modA|tem1|gen6|stAV\tMood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|Voice=Act\t2\tcop\t_\tSpaceAfter=No\n4\t.\t.\tPUNCT\tPunc\t_\t2\tpunct\t_\t_\n\n# sent_id = 2\n# text = Galli moriuntur.\n1\tGalli\tGallus\tPRON\tF1|grn1|casJ|gen1|stPD\tCase=Nom|Degree=Pos|Gender=Masc|Number=Plur|PronType=Dem\t2\tnsubj:pass\t_\t_\n2\tmoriuntur\tmorior\tVERB\tL3|modJ|tem1|gen9|stAV\tMood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin|Voice=Pass\t0\troot\troot\tSpaceAfter=No\n3\t.\t.\tPUNCT\tPunc\t_\t2\tpunct\t_\tSpacesAfter=\\n\n\n"}
app_dict: Dict[str, TestHelper] = {}
- aqls: List[str] = ["=".join([Phenomenon.partOfSpeech.value, '"{0}"'.format(
- AnnotationService.phenomenon_map[Phenomenon.partOfSpeech][PartOfSpeech.verb.name][0])])]
+ aqls: List[str] = ["=".join([Phenomenon.UPOSTAG, '"{0}"'.format(
+ AnnotationService.phenomenon_map[Phenomenon.UPOSTAG][PartOfSpeech.verb.name][0])])]
graph_data: GraphData = AnnotationService.map_graph_data(annis_response_dict["graph_data_raw"])
annis_response: AnnisResponse = AnnisResponse(graph_data=graph_data)
corpora: List[Corpus] = [
@@ -686,7 +686,7 @@ class Mocks:
cts_reff_xml: str = 'GetValidReffurn:cts:latinLit:phi0448.phi001.perseus-lat2:1.13urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.2urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.3urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.4urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.5urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.6urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.7'
exercise: Exercise = ExerciseMC.from_dict(
eid="test", last_access_time=datetime.utcnow().timestamp(), exercise_type='ddwtos',
- search_values=f'["{Phenomenon.case.name}={Case.accusative.name}", "{Phenomenon.dependency.name}={Dependency.object.name}", "{Phenomenon.lemma.name}=bellum", "{Phenomenon.dependency.name}={Dependency.root.name}"]',
+ search_values=f'["{Phenomenon.FEATS}={Case.accusative.name}", "{Phenomenon.DEPENDENCY}={Dependency.object.name}", "{Phenomenon.LEMMA}=bellum", "{Phenomenon.DEPENDENCY}={Dependency.root.name}"]',
language=Language.English.value,
conll="# newdoc id = /var/folders/30/yqnv6lz56r14dqhpw18knn2r0000gp/T/tmp7qn86au9\n# newpar\n# sent_id = 1\n# text = Caesar fortis est.\n1\tCaesar\tCaeso\tVERB\tC1|grn1|casA|gen1|stAN\tCase=Nom|Degree=Pos|Gender=Masc|Number=Sing\t2\tcsubj\t_\t_\n2\tfortis\tfortis\tADJ\tC1|grn1|casA|gen1|stAN\tCase=Nom|Degree=Pos|Gender=Masc|Number=Sing\t0\troot\t_\t_\n3\test\tsum\tAUX\tN3|modA|tem1|gen6|stAV\tMood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|Voice=Act\t2\tcop\t_\tSpaceAfter=No\n4\t.\t.\tPUNCT\tPunc\t_\t2\tpunct\t_\t_\n\n# sent_id = 2\n# text = Galli moriuntur.\n1\tGalli\tGallus\tPRON\tF1|grn1|casJ|gen1|stPD\tCase=Nom|Degree=Pos|Gender=Masc|Number=Plur|PronType=Dem\t2\tnsubj:pass\t_\t_\n2\tmoriuntur\tmorior\tVERB\tL3|modJ|tem1|gen9|stAV\tMood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin|Voice=Pass\t0\troot\t_\tSpaceAfter=No\n3\t.\t.\tPUNCT\tPunc\t_\t2\tpunct\t_\tSpacesAfter=\\n\n\n",
solutions=json.dumps([
diff --git a/mc_backend/openapi/openapi_server/controllers/default_controller.py b/mc_backend/openapi/openapi_server/controllers/default_controller.py
index 31bdf0f..5557a39 100644
--- a/mc_backend/openapi/openapi_server/controllers/default_controller.py
+++ b/mc_backend/openapi/openapi_server/controllers/default_controller.py
@@ -3,6 +3,10 @@ import six
from openapi.openapi_server.models.annis_response import AnnisResponse # noqa: E501
from openapi.openapi_server.models.corpus import Corpus # noqa: E501
+from openapi.openapi_server.models.exercise import Exercise # noqa: E501
+from openapi.openapi_server.models.file_type import FileType # noqa: E501
+from openapi.openapi_server.models.frequency_item import FrequencyItem # noqa: E501
+from openapi.openapi_server.models.static_exercise import StaticExercise # noqa: E501
from openapi.openapi_server import util
@@ -88,12 +92,84 @@ def mcserver_app_api_exercise_api_post(): # noqa: E501
return 'do some magic!'
+def mcserver_app_api_exercise_list_api_get(lang, frequency_upper_bound=None, last_update_time=None, vocabulary=None): # noqa: E501
+ """Provides metadata for all available exercises.
+
+ # noqa: E501
+
+ :param lang: ISO 639-1 Language Code for the localization of exercise content.
+ :type lang: str
+ :param frequency_upper_bound: Upper bound for reference vocabulary frequency.
+ :type frequency_upper_bound: int
+ :param last_update_time: Time (in milliseconds) of the last update.
+ :type last_update_time: int
+ :param vocabulary: Identifier for a reference vocabulary.
+ :type vocabulary: str
+
+ :rtype: Exercise
+ """
+ return 'do some magic!'
+
+
+def mcserver_app_api_file_api_get(id, type, solution_indices=None): # noqa: E501
+ """Provides the URL to download a specific file.
+
+ # noqa: E501
+
+ :param id: Unique identifier (UUID) for an exercise.
+ :type id: str
+ :param type: File format for the requested download.
+ :type type: dict | bytes
+ :param solution_indices: Indices for the solutions that should be included in the download.
+ :type solution_indices: List[int]
+
+ :rtype: Exercise
+ """
+ if connexion.request.is_json:
+ type = FileType.from_dict(connexion.request.get_json()) # noqa: E501
+ return 'do some magic!'
+
+
+def mcserver_app_api_file_api_post(file_type=None, html_content=None, learning_result=None, urn=None): # noqa: E501
+ """Serializes and persists learning results or HTML content for later access.
+
+ # noqa: E501
+
+ :param file_type:
+ :type file_type: dict | bytes
+ :param html_content: HTML content to be serialized.
+ :type html_content: str
+ :param learning_result: Serialized XAPI results for an interactive exercise.
+ :type learning_result: str
+ :param urn: CTS URN for the text passage from which the HTML content was created.
+ :type urn: str
+
+ :rtype: str
+ """
+ if connexion.request.is_json:
+ file_type = FileType.from_dict(connexion.request.get_json()) # noqa: E501
+ return 'do some magic!'
+
+
+def mcserver_app_api_frequency_api_get(urn): # noqa: E501
+ """Returns results for a frequency query from ANNIS for a given CTS URN.
+
+ # noqa: E501
+
+ :param urn: CTS URN for referencing the corpus.
+ :type urn: str
+
+ :rtype: List[FrequencyItem]
+ """
+ return 'do some magic!'
+
+
def mcserver_app_api_static_exercises_api_get(): # noqa: E501
"""Returns metadata for static exercises.
# noqa: E501
- :rtype: object
+ :rtype: Dict[str, StaticExercise]
"""
return 'do some magic!'
diff --git a/mc_backend/openapi/openapi_server/models/__init__.py b/mc_backend/openapi/openapi_server/models/__init__.py
index 8bcc862..09dc551 100644
--- a/mc_backend/openapi/openapi_server/models/__init__.py
+++ b/mc_backend/openapi/openapi_server/models/__init__.py
@@ -10,12 +10,14 @@ from openapi.openapi_server.models.exercise_all_of import ExerciseAllOf
from openapi.openapi_server.models.exercise_base import ExerciseBase
from openapi.openapi_server.models.exercise_form import ExerciseForm
from openapi.openapi_server.models.exercise_form_all_of import ExerciseFormAllOf
+from openapi.openapi_server.models.file_type import FileType
from openapi.openapi_server.models.frequency_item import FrequencyItem
from openapi.openapi_server.models.graph_data import GraphData
-from openapi.openapi_server.models.learning_result import LearningResult
+from openapi.openapi_server.models.inline_object import InlineObject
from openapi.openapi_server.models.link import Link
from openapi.openapi_server.models.node_mc import NodeMC
+from openapi.openapi_server.models.phenomenon import Phenomenon
from openapi.openapi_server.models.solution import Solution
from openapi.openapi_server.models.solution_element import SolutionElement
+from openapi.openapi_server.models.static_exercise import StaticExercise
from openapi.openapi_server.models.text_complexity import TextComplexity
-from openapi.openapi_server.models.update_info import UpdateInfo
diff --git a/mc_backend/openapi/openapi_server/models/file_type.py b/mc_backend/openapi/openapi_server/models/file_type.py
new file mode 100644
index 0000000..aeeb56b
--- /dev/null
+++ b/mc_backend/openapi/openapi_server/models/file_type.py
@@ -0,0 +1,44 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from datetime import date, datetime # noqa: F401
+
+from typing import List, Dict # noqa: F401
+
+from openapi.openapi_server.models.base_model_ import Model
+from openapi.openapi_server import util
+
+
+class FileType(Model):
+ """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
+
+ Do not edit the class manually.
+ """
+
+ """
+ allowed enum values
+ """
+ DOCX = "docx"
+ JSON = "json"
+ PDF = "pdf"
+ XML = "xml"
+ def __init__(self): # noqa: E501
+ """FileType - a model defined in OpenAPI
+
+ """
+ self.openapi_types = {
+ }
+
+ self.attribute_map = {
+ }
+
+ @classmethod
+ def from_dict(cls, dikt) -> 'FileType':
+ """Returns the dict as a model
+
+ :param dikt: A dict.
+ :type: dict
+ :return: The FileType of this FileType. # noqa: E501
+ :rtype: FileType
+ """
+ return util.deserialize_model(dikt, cls)
diff --git a/mc_backend/openapi/openapi_server/models/frequency_item.py b/mc_backend/openapi/openapi_server/models/frequency_item.py
index 86c6b7e..8a61b35 100644
--- a/mc_backend/openapi/openapi_server/models/frequency_item.py
+++ b/mc_backend/openapi/openapi_server/models/frequency_item.py
@@ -6,8 +6,10 @@ from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi.openapi_server.models.base_model_ import Model
+from openapi.openapi_server.models.phenomenon import Phenomenon
from openapi.openapi_server import util
+from openapi.openapi_server.models.phenomenon import Phenomenon # noqa: E501
class FrequencyItem(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@@ -21,13 +23,13 @@ class FrequencyItem(Model):
:param count: The count of this FrequencyItem. # noqa: E501
:type count: int
:param phenomena: The phenomena of this FrequencyItem. # noqa: E501
- :type phenomena: List[str]
+ :type phenomena: List[Phenomenon]
:param values: The values of this FrequencyItem. # noqa: E501
:type values: List[str]
"""
self.openapi_types = {
'count': int,
- 'phenomena': List[str],
+ 'phenomena': List[Phenomenon],
'values': List[str]
}
@@ -79,10 +81,10 @@ class FrequencyItem(Model):
def phenomena(self):
"""Gets the phenomena of this FrequencyItem.
- Labels for the phenomena described in this frequency entry. # noqa: E501
+ Labels for the linguistic phenomena described in this frequency entry. # noqa: E501
:return: The phenomena of this FrequencyItem.
- :rtype: List[str]
+ :rtype: List[Phenomenon]
"""
return self._phenomena
@@ -90,10 +92,10 @@ class FrequencyItem(Model):
def phenomena(self, phenomena):
"""Sets the phenomena of this FrequencyItem.
- Labels for the phenomena described in this frequency entry. # noqa: E501
+ Labels for the linguistic phenomena described in this frequency entry. # noqa: E501
:param phenomena: The phenomena of this FrequencyItem.
- :type phenomena: List[str]
+ :type phenomena: List[Phenomenon]
"""
self._phenomena = phenomena
diff --git a/mc_backend/openapi/openapi_server/models/inline_object.py b/mc_backend/openapi/openapi_server/models/inline_object.py
new file mode 100644
index 0000000..8e3c40d
--- /dev/null
+++ b/mc_backend/openapi/openapi_server/models/inline_object.py
@@ -0,0 +1,150 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from datetime import date, datetime # noqa: F401
+
+from typing import List, Dict # noqa: F401
+
+from openapi.openapi_server.models.base_model_ import Model
+from openapi.openapi_server.models.file_type import FileType
+from openapi.openapi_server import util
+
+from openapi.openapi_server.models.file_type import FileType # noqa: E501
+
+class InlineObject(Model):
+ """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, file_type=None, html_content=None, learning_result=None, urn=None): # noqa: E501
+ """InlineObject - a model defined in OpenAPI
+
+ :param file_type: The file_type of this InlineObject. # noqa: E501
+ :type file_type: FileType
+ :param html_content: The html_content of this InlineObject. # noqa: E501
+ :type html_content: str
+ :param learning_result: The learning_result of this InlineObject. # noqa: E501
+ :type learning_result: str
+ :param urn: The urn of this InlineObject. # noqa: E501
+ :type urn: str
+ """
+ self.openapi_types = {
+ 'file_type': FileType,
+ 'html_content': str,
+ 'learning_result': str,
+ 'urn': str
+ }
+
+ self.attribute_map = {
+ 'file_type': 'file_type',
+ 'html_content': 'html_content',
+ 'learning_result': 'learning_result',
+ 'urn': 'urn'
+ }
+
+ self._file_type = file_type
+ self._html_content = html_content
+ self._learning_result = learning_result
+ self._urn = urn
+
+ @classmethod
+ def from_dict(cls, dikt) -> 'InlineObject':
+ """Returns the dict as a model
+
+ :param dikt: A dict.
+ :type: dict
+ :return: The inline_object of this InlineObject. # noqa: E501
+ :rtype: InlineObject
+ """
+ return util.deserialize_model(dikt, cls)
+
+ @property
+ def file_type(self):
+ """Gets the file_type of this InlineObject.
+
+
+ :return: The file_type of this InlineObject.
+ :rtype: FileType
+ """
+ return self._file_type
+
+ @file_type.setter
+ def file_type(self, file_type):
+ """Sets the file_type of this InlineObject.
+
+
+ :param file_type: The file_type of this InlineObject.
+ :type file_type: FileType
+ """
+
+ self._file_type = file_type
+
+ @property
+ def html_content(self):
+ """Gets the html_content of this InlineObject.
+
+ HTML content to be serialized. # noqa: E501
+
+ :return: The html_content of this InlineObject.
+ :rtype: str
+ """
+ return self._html_content
+
+ @html_content.setter
+ def html_content(self, html_content):
+ """Sets the html_content of this InlineObject.
+
+ HTML content to be serialized. # noqa: E501
+
+ :param html_content: The html_content of this InlineObject.
+ :type html_content: str
+ """
+
+ self._html_content = html_content
+
+ @property
+ def learning_result(self):
+ """Gets the learning_result of this InlineObject.
+
+ Serialized XAPI results for an interactive exercise. # noqa: E501
+
+ :return: The learning_result of this InlineObject.
+ :rtype: str
+ """
+ return self._learning_result
+
+ @learning_result.setter
+ def learning_result(self, learning_result):
+ """Sets the learning_result of this InlineObject.
+
+ Serialized XAPI results for an interactive exercise. # noqa: E501
+
+ :param learning_result: The learning_result of this InlineObject.
+ :type learning_result: str
+ """
+
+ self._learning_result = learning_result
+
+ @property
+ def urn(self):
+ """Gets the urn of this InlineObject.
+
+ CTS URN for the text passage from which the HTML content was created. # noqa: E501
+
+ :return: The urn of this InlineObject.
+ :rtype: str
+ """
+ return self._urn
+
+ @urn.setter
+ def urn(self, urn):
+ """Sets the urn of this InlineObject.
+
+ CTS URN for the text passage from which the HTML content was created. # noqa: E501
+
+ :param urn: The urn of this InlineObject.
+ :type urn: str
+ """
+
+ self._urn = urn
diff --git a/mc_backend/openapi/openapi_server/models/inline_response200.py b/mc_backend/openapi/openapi_server/models/inline_response200.py
index 180b848..9adccfa 100644
--- a/mc_backend/openapi/openapi_server/models/inline_response200.py
+++ b/mc_backend/openapi/openapi_server/models/inline_response200.py
@@ -6,18 +6,8 @@ from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi.openapi_server.models.base_model_ import Model
-from openapi.openapi_server.models.inline_response200_frequency_analysis import InlineResponse200FrequencyAnalysis
-from openapi.openapi_server.models.inline_response200_text_complexity import InlineResponse200TextComplexity
-from openapi.openapi_server.models.link import Link
-from openapi.openapi_server.models.node import Node
-from openapi.openapi_server.models.solution import Solution
from openapi.openapi_server import util
-from openapi.openapi_server.models.inline_response200_frequency_analysis import InlineResponse200FrequencyAnalysis # noqa: E501
-from openapi.openapi_server.models.inline_response200_text_complexity import InlineResponse200TextComplexity # noqa: E501
-from openapi.openapi_server.models.link import Link # noqa: E501
-from openapi.openapi_server.models.node import Node # noqa: E501
-from openapi.openapi_server.models.solution import Solution # noqa: E501
class InlineResponse200(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
@@ -25,71 +15,26 @@ class InlineResponse200(Model):
Do not edit the class manually.
"""
- def __init__(self, directed=None, exercise_id=None, exercise_type=None, frequency_analysis=None, graph=None, links=None, multigraph=None, nodes=None, solutions=None, text_complexity=None, uri=None): # noqa: E501
+ def __init__(self, solutions=None, urn=None): # noqa: E501
"""InlineResponse200 - a model defined in OpenAPI
- :param directed: The directed of this InlineResponse200. # noqa: E501
- :type directed: bool
- :param exercise_id: The exercise_id of this InlineResponse200. # noqa: E501
- :type exercise_id: str
- :param exercise_type: The exercise_type of this InlineResponse200. # noqa: E501
- :type exercise_type: str
- :param frequency_analysis: The frequency_analysis of this InlineResponse200. # noqa: E501
- :type frequency_analysis: List[InlineResponse200FrequencyAnalysis]
- :param graph: The graph of this InlineResponse200. # noqa: E501
- :type graph: object
- :param links: The links of this InlineResponse200. # noqa: E501
- :type links: List[Link]
- :param multigraph: The multigraph of this InlineResponse200. # noqa: E501
- :type multigraph: bool
- :param nodes: The nodes of this InlineResponse200. # noqa: E501
- :type nodes: List[Node]
:param solutions: The solutions of this InlineResponse200. # noqa: E501
- :type solutions: List[Solution]
- :param text_complexity: The text_complexity of this InlineResponse200. # noqa: E501
- :type text_complexity: InlineResponse200TextComplexity
- :param uri: The uri of this InlineResponse200. # noqa: E501
- :type uri: str
+ :type solutions: List[List[str]]
+ :param urn: The urn of this InlineResponse200. # noqa: E501
+ :type urn: str
"""
self.openapi_types = {
- 'directed': bool,
- 'exercise_id': str,
- 'exercise_type': str,
- 'frequency_analysis': List[InlineResponse200FrequencyAnalysis],
- 'graph': object,
- 'links': List[Link],
- 'multigraph': bool,
- 'nodes': List[Node],
- 'solutions': List[Solution],
- 'text_complexity': InlineResponse200TextComplexity,
- 'uri': str
+ 'solutions': List[List[str]],
+ 'urn': str
}
self.attribute_map = {
- 'directed': 'directed',
- 'exercise_id': 'exercise_id',
- 'exercise_type': 'exercise_type',
- 'frequency_analysis': 'frequency_analysis',
- 'graph': 'graph',
- 'links': 'links',
- 'multigraph': 'multigraph',
- 'nodes': 'nodes',
'solutions': 'solutions',
- 'text_complexity': 'text_complexity',
- 'uri': 'uri'
+ 'urn': 'urn'
}
- self._directed = directed
- self._exercise_id = exercise_id
- self._exercise_type = exercise_type
- self._frequency_analysis = frequency_analysis
- self._graph = graph
- self._links = links
- self._multigraph = multigraph
- self._nodes = nodes
self._solutions = solutions
- self._text_complexity = text_complexity
- self._uri = uri
+ self._urn = urn
@classmethod
def from_dict(cls, dikt) -> 'InlineResponse200':
@@ -102,198 +47,14 @@ class InlineResponse200(Model):
"""
return util.deserialize_model(dikt, cls)
- @property
- def directed(self):
- """Gets the directed of this InlineResponse200.
-
- Whether edges in the returned graph are directed. # noqa: E501
-
- :return: The directed of this InlineResponse200.
- :rtype: bool
- """
- return self._directed
-
- @directed.setter
- def directed(self, directed):
- """Sets the directed of this InlineResponse200.
-
- Whether edges in the returned graph are directed. # noqa: E501
-
- :param directed: The directed of this InlineResponse200.
- :type directed: bool
- """
-
- self._directed = directed
-
- @property
- def exercise_id(self):
- """Gets the exercise_id of this InlineResponse200.
-
- Unique identifier (UUID) for the exercise. # noqa: E501
-
- :return: The exercise_id of this InlineResponse200.
- :rtype: str
- """
- return self._exercise_id
-
- @exercise_id.setter
- def exercise_id(self, exercise_id):
- """Sets the exercise_id of this InlineResponse200.
-
- Unique identifier (UUID) for the exercise. # noqa: E501
-
- :param exercise_id: The exercise_id of this InlineResponse200.
- :type exercise_id: str
- """
-
- self._exercise_id = exercise_id
-
- @property
- def exercise_type(self):
- """Gets the exercise_type of this InlineResponse200.
-
- Type of exercise, concerning interaction and layout. # noqa: E501
-
- :return: The exercise_type of this InlineResponse200.
- :rtype: str
- """
- return self._exercise_type
-
- @exercise_type.setter
- def exercise_type(self, exercise_type):
- """Sets the exercise_type of this InlineResponse200.
-
- Type of exercise, concerning interaction and layout. # noqa: E501
-
- :param exercise_type: The exercise_type of this InlineResponse200.
- :type exercise_type: str
- """
-
- self._exercise_type = exercise_type
-
- @property
- def frequency_analysis(self):
- """Gets the frequency_analysis of this InlineResponse200.
-
- List of items with frequency data for linguistic phenomena. # noqa: E501
-
- :return: The frequency_analysis of this InlineResponse200.
- :rtype: List[InlineResponse200FrequencyAnalysis]
- """
- return self._frequency_analysis
-
- @frequency_analysis.setter
- def frequency_analysis(self, frequency_analysis):
- """Sets the frequency_analysis of this InlineResponse200.
-
- List of items with frequency data for linguistic phenomena. # noqa: E501
-
- :param frequency_analysis: The frequency_analysis of this InlineResponse200.
- :type frequency_analysis: List[InlineResponse200FrequencyAnalysis]
- """
-
- self._frequency_analysis = frequency_analysis
-
- @property
- def graph(self):
- """Gets the graph of this InlineResponse200.
-
- Additional graph data. # noqa: E501
-
- :return: The graph of this InlineResponse200.
- :rtype: object
- """
- return self._graph
-
- @graph.setter
- def graph(self, graph):
- """Sets the graph of this InlineResponse200.
-
- Additional graph data. # noqa: E501
-
- :param graph: The graph of this InlineResponse200.
- :type graph: object
- """
-
- self._graph = graph
-
- @property
- def links(self):
- """Gets the links of this InlineResponse200.
-
- List of edges for the graph. # noqa: E501
-
- :return: The links of this InlineResponse200.
- :rtype: List[Link]
- """
- return self._links
-
- @links.setter
- def links(self, links):
- """Sets the links of this InlineResponse200.
-
- List of edges for the graph. # noqa: E501
-
- :param links: The links of this InlineResponse200.
- :type links: List[Link]
- """
-
- self._links = links
-
- @property
- def multigraph(self):
- """Gets the multigraph of this InlineResponse200.
-
- Whether the graph consists of multiple subgraphs. # noqa: E501
-
- :return: The multigraph of this InlineResponse200.
- :rtype: bool
- """
- return self._multigraph
-
- @multigraph.setter
- def multigraph(self, multigraph):
- """Sets the multigraph of this InlineResponse200.
-
- Whether the graph consists of multiple subgraphs. # noqa: E501
-
- :param multigraph: The multigraph of this InlineResponse200.
- :type multigraph: bool
- """
-
- self._multigraph = multigraph
-
- @property
- def nodes(self):
- """Gets the nodes of this InlineResponse200.
-
- List of nodes for the graph. # noqa: E501
-
- :return: The nodes of this InlineResponse200.
- :rtype: List[Node]
- """
- return self._nodes
-
- @nodes.setter
- def nodes(self, nodes):
- """Sets the nodes of this InlineResponse200.
-
- List of nodes for the graph. # noqa: E501
-
- :param nodes: The nodes of this InlineResponse200.
- :type nodes: List[Node]
- """
-
- self._nodes = nodes
-
@property
def solutions(self):
"""Gets the solutions of this InlineResponse200.
- Correct solutions for this exercise. # noqa: E501
+ Solutions for the exercise. # noqa: E501
:return: The solutions of this InlineResponse200.
- :rtype: List[Solution]
+ :rtype: List[List[str]]
"""
return self._solutions
@@ -301,54 +62,33 @@ class InlineResponse200(Model):
def solutions(self, solutions):
"""Sets the solutions of this InlineResponse200.
- Correct solutions for this exercise. # noqa: E501
+ Solutions for the exercise. # noqa: E501
:param solutions: The solutions of this InlineResponse200.
- :type solutions: List[Solution]
+ :type solutions: List[List[str]]
"""
self._solutions = solutions
@property
- def text_complexity(self):
- """Gets the text_complexity of this InlineResponse200.
-
-
- :return: The text_complexity of this InlineResponse200.
- :rtype: InlineResponse200TextComplexity
- """
- return self._text_complexity
-
- @text_complexity.setter
- def text_complexity(self, text_complexity):
- """Sets the text_complexity of this InlineResponse200.
-
-
- :param text_complexity: The text_complexity of this InlineResponse200.
- :type text_complexity: InlineResponse200TextComplexity
- """
-
- self._text_complexity = text_complexity
-
- @property
- def uri(self):
- """Gets the uri of this InlineResponse200.
+ def urn(self):
+ """Gets the urn of this InlineResponse200.
- URI for accessing the exercise in this API. # noqa: E501
+ CTS URN for the text passage from which the exercise was created. # noqa: E501
- :return: The uri of this InlineResponse200.
+ :return: The urn of this InlineResponse200.
:rtype: str
"""
- return self._uri
+ return self._urn
- @uri.setter
- def uri(self, uri):
- """Sets the uri of this InlineResponse200.
+ @urn.setter
+ def urn(self, urn):
+ """Sets the urn of this InlineResponse200.
- URI for accessing the exercise in this API. # noqa: E501
+ CTS URN for the text passage from which the exercise was created. # noqa: E501
- :param uri: The uri of this InlineResponse200.
- :type uri: str
+ :param urn: The urn of this InlineResponse200.
+ :type urn: str
"""
- self._uri = uri
+ self._urn = urn
diff --git a/mc_backend/openapi/openapi_server/models/phenomenon.py b/mc_backend/openapi/openapi_server/models/phenomenon.py
new file mode 100644
index 0000000..0fad63b
--- /dev/null
+++ b/mc_backend/openapi/openapi_server/models/phenomenon.py
@@ -0,0 +1,44 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from datetime import date, datetime # noqa: F401
+
+from typing import List, Dict # noqa: F401
+
+from openapi.openapi_server.models.base_model_ import Model
+from openapi.openapi_server import util
+
+
+class Phenomenon(Model):
+ """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
+
+ Do not edit the class manually.
+ """
+
+ """
+ allowed enum values
+ """
+ DEPENDENCY = "dependency"
+ FEATS = "feats"
+ LEMMA = "lemma"
+ UPOSTAG = "upostag"
+ def __init__(self): # noqa: E501
+ """Phenomenon - a model defined in OpenAPI
+
+ """
+ self.openapi_types = {
+ }
+
+ self.attribute_map = {
+ }
+
+ @classmethod
+ def from_dict(cls, dikt) -> 'Phenomenon':
+ """Returns the dict as a model
+
+ :param dikt: A dict.
+ :type: dict
+ :return: The Phenomenon of this Phenomenon. # noqa: E501
+ :rtype: Phenomenon
+ """
+ return util.deserialize_model(dikt, cls)
diff --git a/mc_backend/openapi/openapi_server/models/static_exercise.py b/mc_backend/openapi/openapi_server/models/static_exercise.py
new file mode 100644
index 0000000..6e1687f
--- /dev/null
+++ b/mc_backend/openapi/openapi_server/models/static_exercise.py
@@ -0,0 +1,94 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from datetime import date, datetime # noqa: F401
+
+from typing import List, Dict # noqa: F401
+
+from openapi.openapi_server.models.base_model_ import Model
+from openapi.openapi_server import util
+
+
+class StaticExercise(Model):
+ """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
+
+ Do not edit the class manually.
+ """
+
+ def __init__(self, solutions=None, urn=None): # noqa: E501
+ """StaticExercise - a model defined in OpenAPI
+
+ :param solutions: The solutions of this StaticExercise. # noqa: E501
+ :type solutions: List[List[str]]
+ :param urn: The urn of this StaticExercise. # noqa: E501
+ :type urn: str
+ """
+ self.openapi_types = {
+ 'solutions': List[List[str]],
+ 'urn': str
+ }
+
+ self.attribute_map = {
+ 'solutions': 'solutions',
+ 'urn': 'urn'
+ }
+
+ self._solutions = solutions
+ self._urn = urn
+
+ @classmethod
+ def from_dict(cls, dikt) -> 'StaticExercise':
+ """Returns the dict as a model
+
+ :param dikt: A dict.
+ :type: dict
+ :return: The StaticExercise of this StaticExercise. # noqa: E501
+ :rtype: StaticExercise
+ """
+ return util.deserialize_model(dikt, cls)
+
+ @property
+ def solutions(self):
+ """Gets the solutions of this StaticExercise.
+
+ Solutions for the exercise. # noqa: E501
+
+ :return: The solutions of this StaticExercise.
+ :rtype: List[List[str]]
+ """
+ return self._solutions
+
+ @solutions.setter
+ def solutions(self, solutions):
+ """Sets the solutions of this StaticExercise.
+
+ Solutions for the exercise. # noqa: E501
+
+ :param solutions: The solutions of this StaticExercise.
+ :type solutions: List[List[str]]
+ """
+
+ self._solutions = solutions
+
+ @property
+ def urn(self):
+ """Gets the urn of this StaticExercise.
+
+ CTS URN for the text passage from which the exercise was created. # noqa: E501
+
+ :return: The urn of this StaticExercise.
+ :rtype: str
+ """
+ return self._urn
+
+ @urn.setter
+ def urn(self, urn):
+ """Sets the urn of this StaticExercise.
+
+ CTS URN for the text passage from which the exercise was created. # noqa: E501
+
+ :param urn: The urn of this StaticExercise.
+ :type urn: str
+ """
+
+ self._urn = urn
diff --git a/mc_backend/openapi/openapi_server/openapi/openapi.yaml b/mc_backend/openapi/openapi_server/openapi/openapi.yaml
index c0dac33..0e95c93 100644
--- a/mc_backend/openapi/openapi_server/openapi/openapi.yaml
+++ b/mc_backend/openapi/openapi_server/openapi/openapi.yaml
@@ -159,7 +159,170 @@ paths:
description: Exercise data object
summary: Creates a new exercise.
x-openapi-router-controller: openapi_server.controllers.default_controller
- /exercises:
+ /exerciseList:
+ get:
+ operationId: mcserver_app_api_exercise_list_api_get
+ parameters:
+ - description: ISO 639-1 Language Code for the localization of exercise content.
+ explode: true
+ in: query
+ name: lang
+ required: true
+ schema:
+ example: en
+ type: string
+ style: form
+ - description: Upper bound for reference vocabulary frequency.
+ explode: true
+ in: query
+ name: frequency_upper_bound
+ required: false
+ schema:
+ default: 0
+ example: 500
+ type: integer
+ style: form
+ - description: Time (in milliseconds) of the last update.
+ explode: true
+ in: query
+ name: last_update_time
+ required: false
+ schema:
+ default: 0
+ example: 123456789
+ type: integer
+ style: form
+ - description: Identifier for a reference vocabulary.
+ explode: true
+ in: query
+ name: vocabulary
+ required: false
+ schema:
+ enum:
+ - agldt
+ - bws
+ - proiel
+ - viva
+ example: agldt
+ type: string
+ style: form
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Exercise'
+ description: Data for interactive exercises, excluding the linguistic details.
+ summary: Provides metadata for all available exercises.
+ x-openapi-router-controller: openapi_server.controllers.default_controller
+ /file:
+ get:
+ operationId: mcserver_app_api_file_api_get
+ parameters:
+ - description: Unique identifier (UUID) for an exercise.
+ explode: true
+ in: query
+ name: id
+ required: true
+ schema:
+ example: 12345678-1234-5678-1234-567812345678
+ type: string
+ style: form
+ - description: File format for the requested download.
+ explode: true
+ in: query
+ name: type
+ required: true
+ schema:
+ $ref: '#/components/schemas/FileType'
+ style: form
+ - description: Indices for the solutions that should be included in the download.
+ explode: true
+ in: query
+ name: solution_indices
+ required: false
+ schema:
+ default: []
+ items:
+ example: 0
+ type: integer
+ type: array
+ style: form
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/Exercise'
+ description: Data for interactive exercises, excluding the linguistic details.
+ summary: Provides the URL to download a specific file.
+ x-openapi-router-controller: openapi_server.controllers.default_controller
+ post:
+ operationId: mcserver_app_api_file_api_post
+ requestBody:
+ $ref: '#/components/requestBodies/inline_object'
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ description: Data that should be serialized and persisted.
+ properties:
+ file_type:
+ $ref: '#/components/schemas/FileType'
+ html_content:
+ description: HTML content to be serialized.
+ example:
+ type: string
+ learning_result:
+ description: Serialized XAPI results for an interactive exercise.
+ example: '{''0'': {}}'
+ type: string
+ urn:
+ description: CTS URN for the text passage from which the HTML content
+ was created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ type: string
+ type: object
+ x-body-name: file_data
+ required: true
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ description: Indication of success, or name of the file that was generated.
+ example: 12345678-1234-5678-1234-567812345678.pdf
+ type: string
+ description: Indication of success, possibly a reference to the resulting
+ file.
+ summary: Serializes and persists learning results or HTML content for later
+ access.
+ x-openapi-router-controller: openapi_server.controllers.default_controller
+ /frequency:
+ get:
+ operationId: mcserver_app_api_frequency_api_get
+ parameters:
+ - description: CTS URN for referencing the corpus.
+ explode: true
+ in: query
+ name: urn
+ required: true
+ schema:
+ example: urn:cts:latinLit:phi1254.phi001.perseus-lat2:5.6.21-5.6.21
+ type: string
+ style: form
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ description: List of items with frequency data for linguistic phenomena.
+ items:
+ $ref: '#/components/schemas/FrequencyItem'
+ type: array
+ description: Frequency analysis, i.e. a list of frequency items.
+ summary: Returns results for a frequency query from ANNIS for a given CTS URN.
+ x-openapi-router-controller: openapi_server.controllers.default_controller
+ /staticExercises:
get:
operationId: mcserver_app_api_static_exercises_api_get
responses:
@@ -167,24 +330,94 @@ paths:
content:
application/json:
schema:
+ additionalProperties:
+ $ref: '#/components/schemas/StaticExercise'
type: object
- description: Metadata for static exercises, including their respective URIs
+ description: Metadata for static exercises, mapped to their respective URIs
in the frontend.
summary: Returns metadata for static exercises.
x-openapi-router-controller: openapi_server.controllers.default_controller
components:
+ requestBodies:
+ inline_object:
+ content:
+ application/x-www-form-urlencoded:
+ schema:
+ $ref: '#/components/schemas/inline_object'
+ required: true
schemas:
+ Corpus:
+ description: Collection of texts.
+ example:
+ citation_level_3: Section
+ author: Aulus Gellius
+ source_urn: urn:cts:latinLit:phi1254.phi001.perseus-lat2
+ title: Noctes Atticae
+ citation_level_1: Book
+ cid: 1
+ citation_level_2: Chapter
+ properties:
+ author:
+ default: Anonymus
+ description: Author of the texts in the corpus.
+ example: Aulus Gellius
+ nullable: false
+ type: string
+ cid:
+ description: Unique identifier for the corpus.
+ example: 1
+ type: integer
+ x-primary-key: true
+ x-autoincrement: true
+ citation_level_1:
+ default: default
+ description: First level for citing the corpus.
+ example: Book
+ type: string
+ citation_level_2:
+ default: default
+ description: Second level for citing the corpus.
+ example: Chapter
+ type: string
+ citation_level_3:
+ default: default
+ description: Third level for citing the corpus.
+ example: Section
+ type: string
+ source_urn:
+ description: CTS base URN for referencing the corpus.
+ example: urn:cts:latinLit:phi1254.phi001.perseus-lat2
+ type: string
+ x-unique: true
+ title:
+ default: Anonymus
+ description: Corpus title.
+ example: Noctes Atticae
+ nullable: false
+ type: string
+ required:
+ - source_urn
+ type: object
+ x-tablename: Corpus
AnnisResponse:
description: A response with graph data from ANNIS, possibly with additional
data for exercises.
example:
frequency_analysis:
- - values: []
+ - values:
+ - values
+ - values
count: 1
- phenomena: []
- - values: []
+ phenomena:
+ - upostag
+ - upostag
+ - values:
+ - values
+ - values
count: 1
- phenomena: []
+ phenomena:
+ - upostag
+ - upostag
text_complexity:
all: 42.31
n_abl_abs: 1
@@ -291,137 +524,42 @@ components:
example: /mc/api/v1.0/file/fd97630c-1f5a-4102-af56-20eb0babdfee
type: string
type: object
- Corpus:
- description: Collection of texts.
- example:
- citation_level_3: Section
- author: Aulus Gellius
- source_urn: urn:cts:latinLit:phi1254.phi001.perseus-lat2
- title: Noctes Atticae
- citation_level_1: Book
- cid: 1
- citation_level_2: Chapter
- properties:
- author:
- default: Anonymus
- description: Author of the texts in the corpus.
- example: Aulus Gellius
- nullable: false
- type: string
- cid:
- description: Unique identifier for the corpus.
- example: 1
- type: integer
- x-primary-key: true
- x-autoincrement: true
- citation_level_1:
- default: default
- description: First level for citing the corpus.
- example: Book
- type: string
- citation_level_2:
- default: default
- description: Second level for citing the corpus.
- example: Chapter
- type: string
- citation_level_3:
- default: default
- description: Third level for citing the corpus.
- example: Section
- type: string
- source_urn:
- description: CTS base URN for referencing the corpus.
- example: urn:cts:latinLit:phi1254.phi001.perseus-lat2
- type: string
- x-unique: true
- title:
- default: Anonymus
- description: Corpus title.
- example: Noctes Atticae
- nullable: false
- type: string
- required:
- - source_urn
- type: object
- x-tablename: Corpus
- Exercise:
- allOf:
- - $ref: '#/components/schemas/ExerciseBase'
- - $ref: '#/components/schemas/Exercise_allOf'
- ExerciseBase:
- description: Base data for creating and evaluating interactive exercises.
- properties:
- correct_feedback:
- default: ""
- description: Feedback for successful completion of the exercise.
- example: Well done!
- type: string
- general_feedback:
- default: ""
- description: Feedback for finishing the exercise.
- example: You have finished the exercise.
- type: string
- incorrect_feedback:
- default: ""
- description: Feedback for failing to complete the exercise successfully.
- example: Unfortunately, that answer is wrong.
- type: string
- instructions:
- default: ""
- description: Hints for how to complete the exercise.
- example: Fill in the gaps!
- type: string
- partially_correct_feedback:
- default: ""
- description: Feedback for successfully completing certain parts of the exercise.
- example: Some parts of this answer are correct.
- type: string
- search_values:
- default: '[]'
- description: Search queries that were used to build the exercise.
- example: '[''upostag=noun'', ''dependency=object'']'
- type: string
- work_author:
- default: ""
- description: Name of the person who wrote the base text for the exercise.
- example: C. Iulius Caesar
- type: string
- work_title:
- default: ""
- description: Title of the base text for the exercise.
- example: Noctes Atticae
- type: string
- required:
- - instructions
- - search_values
- type: object
- ExerciseForm:
- allOf:
- - $ref: '#/components/schemas/ExerciseBase'
- - $ref: '#/components/schemas/ExerciseForm_allOf'
FrequencyItem:
example:
- values: []
+ values:
+ - values
+ - values
count: 1
- phenomena: []
+ phenomena:
+ - upostag
+ - upostag
properties:
count:
description: How often the given combination of values occurred.
example: 1
type: integer
phenomena:
- description: Labels for the phenomena described in this frequency entry.
- example: []
+ description: Labels for the linguistic phenomena described in this frequency
+ entry.
items:
- type: string
+ $ref: '#/components/schemas/Phenomenon'
type: array
values:
description: Values for the phenomena described in this frequency entry.
- example: []
items:
type: string
type: array
type: object
+ Phenomenon:
+ description: 'Linguistic phenomena: syntactic dependencies, morphological features,
+ lemmata, parts of speech.'
+ enum:
+ - dependency
+ - feats
+ - lemma
+ - upostag
+ example: upostag
+ type: string
GraphData:
description: Nodes, edges and metadata for a graph.
example:
@@ -489,134 +627,6 @@ components:
- links
- nodes
type: object
- LearningResult:
- description: Learner data for completed exercises.
- properties:
- actor_account_name:
- default: ""
- description: H5P user ID, usually unique per device.
- example: ebea3f3e-7410-4215-b34d-c1417f7c7c18
- type: string
- actor_object_type:
- default: ""
- description: Describes the kind of object that was recognized as actor.
- example: Agent
- type: string
- category_id:
- default: ""
- description: Link to the exercise type specification.
- example: http://h5p.org/libraries/H5P.MarkTheWords-1.9
- type: string
- category_object_type:
- default: ""
- description: Describes the kind of object that was recognized as exercise.
- example: Activity
- type: string
- choices:
- default: '[]'
- description: JSON string containing a list of possible choices, each with
- ID and description.
- example: |-
- [{'id':'2','description':{'en-US':'Quintus ist bei allen in der Provinz beliebt.
- '}},{'id':'3','description':{'en-US':'Asia ist eine unbekannte Provinz.
- '}}]
- type: string
- completion:
- description: Whether the exercise was fully processed or not.
- example: true
- type: boolean
- correct_responses_pattern:
- description: JSON string containing a list of possible solutions to the
- exercise, given as patterns of answers.
- example: '[''0[,]1[,]2'']'
- type: string
- created_time:
- description: When the learner data was received (POSIX timestamp).
- example: 1234567.789
- format: float
- type: number
- x-index: true
- x-primary-key: true
- duration:
- default: PT0S
- description: How many seconds it took a learner to complete the exercise.
- example: PT9.19S
- type: string
- extensions:
- default: '{}'
- description: JSON string containing a mapping of keys and values (usually
- the local content ID, i.e. a versioning mechanism).
- example: '{''http://h5p.org/x-api/h5p-local-content-id'':1}'
- type: string
- interaction_type:
- default: ""
- description: Exercise type.
- example: choice
- type: string
- object_definition_description:
- description: Exercise content, possibly including instructions.
- example: |
- Bestimme die Form von custodem im Satz: Urbs custodem non tyrannum, domus hospitem non expilatorem recepit.
- type: string
- object_definition_type:
- default: ""
- description: Type of object definition that is presented to the user.
- example: http://adlnet.gov/expapi/activities/cmi.interaction
- type: string
- object_object_type:
- default: ""
- description: Type of object that is presented to the user.
- example: Activity
- type: string
- response:
- description: Answer provided by the user, possibly as a pattern.
- example: His in rebus[,]sociis[,]civibus[,]rei publicae
- type: string
- score_max:
- description: Maximum possible score to be achieved in this exercise.
- example: 1
- type: integer
- score_min:
- description: Minimum score to be achieved in this exercise.
- example: 0
- type: integer
- score_raw:
- description: Score that was actually achieved by the user in this exercise.
- example: 1
- type: integer
- score_scaled:
- default: 0
- description: Relative score (between 0 and 1) that was actually achieved
- by the user in this exercise.
- example: 0.8889
- format: float
- type: number
- success:
- description: Whether the exercise was successfully completed or not.
- example: true
- type: boolean
- verb_display:
- default: ""
- description: Type of action that was performed by the user.
- example: answered
- type: string
- verb_id:
- default: ""
- description: Link to the type of action that was performed by the user.
- example: http://adlnet.gov/expapi/verbs/answered
- type: string
- required:
- - completion
- - correct_responses_pattern
- - created_time
- - object_definition_description
- - response
- - score_max
- - score_min
- - score_raw
- - success
- type: object
- x-tablename: LearningResult
Link:
example:
annis_component_name: dep
@@ -838,36 +848,128 @@ components:
example: 1
type: integer
type: object
- UpdateInfo:
- description: Timestamps for updates of various resources.
+ ExerciseForm:
+ allOf:
+ - $ref: '#/components/schemas/ExerciseBase'
+ - $ref: '#/components/schemas/ExerciseForm_allOf'
+ type: object
+ x-body-name: exercise_data
+ ExerciseBase:
+ description: Base data for creating and evaluating interactive exercises.
properties:
- created_time:
- description: When the resource was created (as POSIX timestamp).
- example: 1234567.789
- format: float
- type: number
- x-index: true
- last_modified_time:
- description: When the resource was last modified (as POSIX timestamp).
- example: 1234567.789
- format: float
- type: number
- x-index: true
- resource_type:
- description: Name of the resource for which update timestamps are indexed.
- enum:
- - cts_data
- - exercise_list
- - file_api_clean
- example: cts_data
+ correct_feedback:
+ default: ""
+ description: Feedback for successful completion of the exercise.
+ example: Well done!
+ type: string
+ general_feedback:
+ default: ""
+ description: Feedback for finishing the exercise.
+ example: You have finished the exercise.
+ type: string
+ incorrect_feedback:
+ default: ""
+ description: Feedback for failing to complete the exercise successfully.
+ example: Unfortunately, that answer is wrong.
+ type: string
+ instructions:
+ default: ""
+ description: Hints for how to complete the exercise.
+ example: Fill in the gaps!
+ type: string
+ partially_correct_feedback:
+ default: ""
+ description: Feedback for successfully completing certain parts of the exercise.
+ example: Some parts of this answer are correct.
+ type: string
+ search_values:
+ default: '[]'
+ description: Search queries that were used to build the exercise.
+ example: '[''upostag=noun'', ''dependency=object'']'
+ type: string
+ work_author:
+ default: ""
+ description: Name of the person who wrote the base text for the exercise.
+ example: C. Iulius Caesar
+ type: string
+ work_title:
+ default: ""
+ description: Title of the base text for the exercise.
+ example: Noctes Atticae
type: string
- x-primary-key: true
required:
- - created_time
- - last_modified_time
- - resource_type
+ - instructions
+ - search_values
+ type: object
+ Exercise:
+ allOf:
+ - $ref: '#/components/schemas/ExerciseBase'
+ - $ref: '#/components/schemas/Exercise_allOf'
+ FileType:
+ description: File format for the requested serialization.
+ enum:
+ - docx
+ - json
+ - pdf
+ - xml
+ example: pdf
+ type: string
+ StaticExercise:
+ description: Metadata for a static exercise.
+ properties:
+ solutions:
+ description: Solutions for the exercise.
+ items:
+ description: Single solution, given as tuple of correct response and its
+ lemma.
+ items:
+ description: Correct response or corresponding lemma.
+ example: gaudeas
+ type: string
+ type: array
+ type: array
+ urn:
+ description: CTS URN for the text passage from which the exercise was created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ type: string
+ type: object
+ inline_object:
+ description: Data that should be serialized and persisted.
+ properties:
+ file_type:
+ $ref: '#/components/schemas/FileType'
+ html_content:
+ description: HTML content to be serialized.
+ example:
+ type: string
+ learning_result:
+ description: Serialized XAPI results for an interactive exercise.
+ example: '{''0'': {}}'
+ type: string
+ urn:
+ description: CTS URN for the text passage from which the HTML content was
+ created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ type: string
type: object
- x-tablename: UpdateInfo
+ x-body-name: file_data
+ ExerciseForm_allOf:
+ description: Additional exercise data.
+ properties:
+ type:
+ description: Type of exercise, concerning interaction and layout.
+ example: markWords
+ type: string
+ type_translation:
+ description: Localized expression of the exercise type.
+ example: Cloze
+ type: string
+ urn:
+ description: CTS URN for the text passage from which the exercise was created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ type: string
+ required:
+ - type
Exercise_allOf:
description: Data for creating and evaluating interactive exercises.
properties:
@@ -930,20 +1032,3 @@ components:
required:
- eid
- last_access_time
- ExerciseForm_allOf:
- description: Additional exercise data.
- properties:
- type:
- description: Type of exercise, concerning interaction and layout.
- example: markWords
- type: string
- type_translation:
- description: Localized expression of the exercise type.
- example: Cloze
- type: string
- urn:
- description: CTS URN for the text passage from which the exercise was created.
- example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
- type: string
- required:
- - type
diff --git a/mc_backend/openapi_models.yaml b/mc_backend/openapi_models.yaml
new file mode 100644
index 0000000..c358f8a
--- /dev/null
+++ b/mc_backend/openapi_models.yaml
@@ -0,0 +1,600 @@
+components:
+ schemas:
+ AnnisResponse:
+ description: A response with graph data from ANNIS, possibly with additional data for exercises.
+ type: object
+ properties:
+ exercise_id:
+ type: string
+ description: Unique identifier (UUID) for the exercise.
+ example: 12345678-1234-5678-1234-567812345678
+ exercise_type:
+ type: string
+ description: Type of exercise, concerning interaction and layout.
+ example: ddwtos
+ frequency_analysis:
+ type: array
+ description: List of items with frequency data for linguistic phenomena.
+ items:
+ $ref: "#/components/schemas/FrequencyItem"
+ graph_data:
+ $ref: "#/components/schemas/GraphData"
+ solutions:
+ type: array
+ description: Correct solutions for this exercise.
+ items:
+ $ref: '#/components/schemas/Solution'
+ text_complexity:
+ $ref: '#/components/schemas/TextComplexity'
+ uri:
+ type: string
+ description: URI for accessing the exercise in this API.
+ example: /mc/api/v1.0/file/fd97630c-1f5a-4102-af56-20eb0babdfee
+ Corpus: # Object definition
+ description: Collection of texts.
+ type: object # Data type
+ x-tablename: Corpus
+ properties:
+ author:
+ type: string
+ description: Author of the texts in the corpus.
+ example: Aulus Gellius
+ default: "Anonymus"
+ nullable: false
+ cid:
+ type: integer
+ description: Unique identifier for the corpus.
+ example: 1
+ x-primary-key: true
+ x-autoincrement: true
+ citation_level_1:
+ type: string
+ description: First level for citing the corpus.
+ example: Book
+ default: default
+ citation_level_2:
+ type: string
+ description: Second level for citing the corpus.
+ example: Chapter
+ default: default
+ citation_level_3:
+ type: string
+ description: Third level for citing the corpus.
+ example: Section
+ default: default
+ source_urn:
+ type: string
+ description: CTS base URN for referencing the corpus.
+ example: urn:cts:latinLit:phi1254.phi001.perseus-lat2
+ x-unique: true
+ title:
+ type: string
+ description: Corpus title.
+ example: Noctes Atticae
+ nullable: false
+ default: Anonymus
+ required:
+ - source_urn
+ Exercise:
+ allOf:
+ - $ref: "#/components/schemas/ExerciseBase"
+ - description: Data for creating and evaluating interactive exercises.
+ type: object # Data type
+ x-tablename: Exercise
+ properties:
+ conll:
+ type: string
+ description: CONLL-formatted linguistic annotations represented as a single string.
+ example: \# newdoc id = ...\n# sent_id = 1\n# text = Caesar fortis est.\n1\tCaesar\tCaeso\tVERB ...
+ default: ""
+ nullable: false
+ eid:
+ type: string
+ description: Unique identifier (UUID) for the exercise.
+ example: 12345678-1234-5678-1234-567812345678
+ x-primary-key: true
+ exercise_type:
+ type: string
+ description: Type of exercise, concerning interaction and layout.
+ example: markWords
+ default: ""
+ nullable: false
+ exercise_type_translation:
+ type: string
+ description: Localized expression of the exercise type.
+ example: Cloze
+ default: ""
+ language:
+ type: string
+ description: ISO 639-1 Language Code for the localization of exercise content.
+ example: en
+ default: de
+ last_access_time:
+ type: number
+ format: float
+ description: When the exercise was last accessed (as POSIX timestamp).
+ example: 1234567.789
+ x-index: true
+ solutions:
+ type: string
+ description: Correct solutions for the exercise.
+ example: "[{'target': {'sentence_id': 1, 'token_id': 7, 'salt_id': 'salt:/urn:...', 'content': 'eo'}, 'value': {'sentence_id': 0, 'token_id': 0, 'content': None, 'salt_id': 'salt:/urn:...'}}]"
+ default: "[]"
+ nullable: false
+ text_complexity:
+ type: number
+ format: float
+ description: Overall text complexity as measured by the software's internal language analysis.
+ example: 54.53
+ default: 0
+ urn:
+ type: string
+ description: CTS URN for the text passage from which the exercise was created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ default: ""
+ nullable: false
+ required:
+ - eid
+ - last_access_time
+ ExerciseBase:
+ description: Base data for creating and evaluating interactive exercises.
+ type: object
+ properties:
+ correct_feedback:
+ type: string
+ description: Feedback for successful completion of the exercise.
+ example: Well done!
+ default: ""
+ general_feedback:
+ type: string
+ description: Feedback for finishing the exercise.
+ example: You have finished the exercise.
+ default: ""
+ incorrect_feedback:
+ type: string
+ description: Feedback for failing to complete the exercise successfully.
+ example: Unfortunately, that answer is wrong.
+ default: ""
+ instructions:
+ type: string
+ description: Hints for how to complete the exercise.
+ example: Fill in the gaps!
+ default: ""
+ partially_correct_feedback:
+ type: string
+ description: Feedback for successfully completing certain parts of the exercise.
+ example: Some parts of this answer are correct.
+ default: ""
+ search_values:
+ type: string
+ description: Search queries that were used to build the exercise.
+ example: "['upostag=noun', 'dependency=object']"
+ default: "[]"
+ work_author:
+ type: string
+ description: Name of the person who wrote the base text for the exercise.
+ example: C. Iulius Caesar
+ default: ""
+ work_title:
+ type: string
+ description: Title of the base text for the exercise.
+ example: Noctes Atticae
+ default: ""
+ required:
+ - instructions
+ - search_values
+ ExerciseForm:
+ x-body-name: exercise_data
+ type: object
+ allOf:
+ - $ref: '#/components/schemas/ExerciseBase'
+ - description: Additional exercise data.
+ type: object
+ properties:
+ type:
+ type: string
+ description: Type of exercise, concerning interaction and layout.
+ example: markWords
+ type_translation:
+ type: string
+ description: Localized expression of the exercise type.
+ example: Cloze
+ urn:
+ type: string
+ description: CTS URN for the text passage from which the exercise was created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ required:
+ - type
+ FileType:
+ type: string
+ enum: [docx, json, pdf, xml]
+ description: File format for the requested serialization.
+ example: pdf
+ FrequencyItem:
+ type: object
+ properties:
+ count:
+ type: integer
+ description: How often the given combination of values occurred.
+ example: 1
+ phenomena:
+ type: array
+ description: Labels for the linguistic phenomena described in this frequency entry.
+ items:
+ $ref: '#/components/schemas/Phenomenon'
+ values:
+ type: array
+ description: Values for the phenomena described in this frequency entry.
+ items:
+ type: string
+ example: ""
+ GraphData:
+ type: object
+ description: Nodes, edges and metadata for a graph.
+ properties:
+ directed:
+ type: boolean
+ description: Whether edges in the returned graph are directed.
+ example: true
+ graph:
+ type: object
+ description: Additional graph data.
+ example: {}
+ links:
+ type: array
+ description: List of edges for the graph.
+ items:
+ $ref: '#/components/schemas/Link'
+ multigraph:
+ type: boolean
+ description: Whether the graph consists of multiple subgraphs.
+ example: true
+ nodes:
+ type: array
+ description: List of nodes for the graph.
+ items:
+ $ref: '#/components/schemas/NodeMC'
+ required:
+ - links
+ - nodes
+ LearningResult:
+ description: Learner data for completed exercises.
+ type: object
+ x-tablename: LearningResult
+ properties:
+ actor_account_name:
+ type: string
+ description: H5P user ID, usually unique per device.
+ example: ebea3f3e-7410-4215-b34d-c1417f7c7c18
+ default: ""
+ actor_object_type:
+ type: string
+ description: Describes the kind of object that was recognized as actor.
+ example: Agent
+ default: ""
+ category_id:
+ type: string
+ description: Link to the exercise type specification.
+ example: http://h5p.org/libraries/H5P.MarkTheWords-1.9
+ default: ""
+ category_object_type:
+ type: string
+ description: Describes the kind of object that was recognized as exercise.
+ example: Activity
+ default: ""
+ choices:
+ type: string
+ description: JSON string containing a list of possible choices, each with ID and description.
+ example: "[{'id':'2','description':{'en-US':'Quintus ist bei allen in der Provinz beliebt.\n'}},{'id':'3','description':{'en-US':'Asia ist eine unbekannte Provinz.\n'}}]"
+ default: "[]"
+ completion:
+ type: boolean
+ description: Whether the exercise was fully processed or not.
+ example: true
+ correct_responses_pattern:
+ type: string
+ description: JSON string containing a list of possible solutions to the exercise, given as patterns of answers.
+ example: "['0[,]1[,]2']"
+ created_time:
+ type: number
+ format: float
+ description: When the learner data was received (POSIX timestamp).
+ example: 1234567.789
+ x-index: true
+ x-primary-key: true
+ duration:
+ type: string
+ description: How many seconds it took a learner to complete the exercise.
+ example: PT9.19S
+ default: "PT0S"
+ extensions:
+ type: string
+ description: JSON string containing a mapping of keys and values (usually the local content ID, i.e. a versioning mechanism).
+ example: "{'http://h5p.org/x-api/h5p-local-content-id':1}"
+ default: "{}"
+ interaction_type:
+ type: string
+ description: Exercise type.
+ example: choice
+ default: ""
+ object_definition_description:
+ type: string
+ description: Exercise content, possibly including instructions.
+ example: "Bestimme die Form von custodem im Satz: Urbs custodem non tyrannum, domus hospitem non expilatorem recepit.\n"
+ object_definition_type:
+ type: string
+ description: Type of object definition that is presented to the user.
+ example: http://adlnet.gov/expapi/activities/cmi.interaction
+ default: ""
+ object_object_type:
+ type: string
+ description: Type of object that is presented to the user.
+ example: Activity
+ default: ""
+ response:
+ type: string
+ description: Answer provided by the user, possibly as a pattern.
+ example: His in rebus[,]sociis[,]civibus[,]rei publicae
+ score_max:
+ type: integer
+ description: Maximum possible score to be achieved in this exercise.
+ example: 1
+ score_min:
+ type: integer
+ description: Minimum score to be achieved in this exercise.
+ example: 0
+ score_raw:
+ type: integer
+ description: Score that was actually achieved by the user in this exercise.
+ example: 1
+ score_scaled:
+ type: number
+ format: float
+ description: Relative score (between 0 and 1) that was actually achieved by the user in this exercise.
+ example: 0.8889
+ default: 0
+ success:
+ type: boolean
+ description: Whether the exercise was successfully completed or not.
+ example: true
+ verb_display:
+ type: string
+ description: Type of action that was performed by the user.
+ example: answered
+ default: ""
+ verb_id:
+ type: string
+ description: Link to the type of action that was performed by the user.
+ example: http://adlnet.gov/expapi/verbs/answered
+ default: ""
+ required:
+ - completion
+ - correct_responses_pattern
+ - created_time
+ - object_definition_description
+ - response
+ - score_max
+ - score_min
+ - score_raw
+ - success
+ Link:
+ type: object
+ properties:
+ annis_component_name:
+ type: string
+ description: Component name as given by ANNIS.
+ example: dep
+ annis_component_type:
+ type: string
+ description: Component type as given by ANNIS.
+ example: Pointing
+ source:
+ type: string
+ description: ID of the source node for the edge.
+ example: salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok1
+ target:
+ type: string
+ description: ID of the target node for the edge.
+ example: salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok3
+ udep_deprel:
+ type: string
+ description: Dependency relation described by the edge.
+ example: "det"
+ NodeMC:
+ type: object
+ properties:
+ annis_node_name:
+ type: string
+ description: Node name as given by ANNIS.
+ example: "urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok1"
+ annis_node_type:
+ type: string
+ description: Node type as given by ANNIS.
+ example: "node"
+ annis_tok:
+ type: string
+ description: Raw word form as given by ANNIS.
+ example: "Galliae"
+ annis_type:
+ type: string
+ description: Node type as given by ANNIS (?).
+ example: "node"
+ id:
+ type: string
+ description: Unique identifier for the node in the SALT model.
+ example: "salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok1"
+ is_oov:
+ type: boolean
+ description: Whether the raw word form is missing in a given vocabulary.
+ example: true
+ udep_lemma:
+ type: string
+ description: Lemmatized word form.
+ example: "Gallia"
+ udep_upostag:
+ type: string
+ description: Universal part of speech tag for the word form.
+ example: "PROPN"
+ udep_xpostag:
+ type: string
+ description: Language-specific part of speech tag for the word form.
+ example: "Ne"
+ udep_feats:
+ type: string
+ description: Additional morphological information.
+ example: "Case=Nom|Gender=Fem|Number=Sing"
+ solution:
+ type: string
+ description: Solution value for this node in an exercise.
+ example: ""
+ Phenomenon:
+ type: string
+ enum: [dependency, feats, lemma, upostag]
+ description: "Linguistic phenomena: syntactic dependencies, morphological features, lemmata, parts of speech."
+ example: upostag
+ Solution:
+ type: object
+ description: Correct solution for an exercise.
+ properties:
+ target:
+ $ref: '#/components/schemas/SolutionElement'
+ value:
+ $ref: '#/components/schemas/SolutionElement'
+ SolutionElement:
+ type: object
+ description: Target or value of a correct solution for an exercise.
+ properties:
+ content:
+ type: string
+ description: Content of the solution element.
+ example: unam
+ salt_id:
+ type: string
+ description: Unique identifier for the node in the SALT model.
+ example: salt:/urn:custom:latinLit:proiel.caes-gal.lat:1.1.1/doc1#sent52548tok9
+ sentence_id:
+ type: integer
+ description: Unique identifier for the sentence in a corpus.
+ example: 52548
+ token_id:
+ type: integer
+ description: Unique identifier for the token in a sentence.
+ example: 9
+ required:
+ - sentence_id
+ - token_id
+ StaticExercise:
+ type: object
+ description: Metadata for a static exercise.
+ properties:
+ solutions:
+ type: array
+ description: Solutions for the exercise.
+ items:
+ type: array
+ description: Single solution, given as tuple of correct response and its lemma.
+ items:
+ description: Correct response or corresponding lemma.
+ type: string
+ example: gaudeas
+ urn:
+ type: string
+ description: CTS URN for the text passage from which the exercise was created.
+ example: urn:cts:latinLit:phi0448.phi001.perseus-lat2:1.1.1
+ TextComplexity:
+ type: object
+ description: Mapping of various elements of text complexity to their corresponding values.
+ properties:
+ all:
+ type: number
+ format: float
+ description: Overall text complexity of the given corpus.
+ example: 42.31
+ avg_w_len:
+ type: number
+ format: float
+ description: Average length of a word in the given corpus.
+ example: 5.4
+ avg_w_per_sent:
+ type: number
+ format: float
+ description: Average number of words per sentence.
+ example: 5.4
+ lex_den:
+ type: number
+ format: float
+ minimum: 0
+ maximum: 1
+ description: Lexical density of the given corpus.
+ example: 0.43
+ n_abl_abs:
+ type: integer
+ description: Number of ablativi absoluti in the given corpus.
+ example: 1
+ n_clause:
+ type: integer
+ description: Number of clauses in the given corpus.
+ example: 1
+ n_gerund:
+ type: integer
+ description: Number of gerunds in the given corpus.
+ example: 1
+ n_inf:
+ type: integer
+ description: Number of infinitives in the given corpus.
+ example: 1
+ n_part:
+ type: integer
+ description: Number of participles in the given corpus.
+ example: 1
+ n_punct:
+ type: integer
+ description: Number of punctuation signs in the given corpus.
+ example: 1
+ n_sent:
+ type: integer
+ description: Number of sentences in the given corpus.
+ example: 1
+ n_subclause:
+ type: integer
+ description: Number of subclauses in the given corpus.
+ example: 1
+ n_types:
+ type: integer
+ description: Number of distinct word forms in the given corpus.
+ example: 1
+ n_w:
+ type: integer
+ description: Number of words in the given corpus.
+ example: 1
+ pos:
+ type: integer
+ description: Number of distinct part of speech tags in the given corpus.
+ example: 1
+ UpdateInfo:
+ description: Timestamps for updates of various resources.
+ type: object
+ x-tablename: UpdateInfo
+ properties:
+ created_time:
+ type: number
+ format: float
+ description: When the resource was created (as POSIX timestamp).
+ example: 1234567.789
+ x-index: true
+ last_modified_time:
+ type: number
+ format: float
+ description: When the resource was last modified (as POSIX timestamp).
+ example: 1234567.789
+ x-index: true
+ resource_type:
+ type: string
+ enum: [cts_data, exercise_list, file_api_clean]
+ description: Name of the resource for which update timestamps are indexed.
+ example: cts_data
+ x-primary-key: true
+ required:
+ - created_time
+ - last_modified_time
+ - resource_type
diff --git a/mc_backend/requirements.txt b/mc_backend/requirements.txt
index 83bed99..737005e 100644
--- a/mc_backend/requirements.txt
+++ b/mc_backend/requirements.txt
@@ -51,6 +51,7 @@ OpenAlchemy==1.1.0
openapi-spec-validator==0.2.8
pathspec==0.8.0
Pillow==7.1.2
+prance==0.18.2
psycopg2==2.8.5
psycopg2-binary==2.8.5
pycparser==2.20
@@ -72,6 +73,7 @@ reportlab==3.5.42
requests==2.23.0
s3transfer==0.3.3
scipy==1.4.1
+semver==2.10.1
six==1.14.0
smart-open==2.0.0
soupsieve==2.0
diff --git a/mc_backend/tests.py b/mc_backend/tests.py
index e7993e2..ee875a6 100644
--- a/mc_backend/tests.py
+++ b/mc_backend/tests.py
@@ -37,11 +37,10 @@ from mcserver.app import create_app, db, start_updater, full_init
from mcserver.app.api.exerciseAPI import map_exercise_data_to_database
from mcserver.app.models import ResourceType, FileType, ExerciseType, ExerciseData, \
NodeMC, LinkMC, GraphData, Phenomenon, CustomCorpus, AnnisResponse, Solution, DownloadableFile, Language, \
- VocabularyCorpus, TextComplexityMeasure, FrequencyAnalysis, CitationLevel, FrequencyItem, \
- TextComplexity, Dependency, PartOfSpeech, Choice, XapiStatement, ExerciseMC, CorpusMC, \
- make_solution_element_from_salt_id
+ VocabularyCorpus, TextComplexityMeasure, CitationLevel, FrequencyItem, TextComplexity, Dependency, PartOfSpeech, \
+ Choice, XapiStatement, ExerciseMC, CorpusMC, make_solution_element_from_salt_id
from mcserver.app.services import AnnotationService, CorpusService, FileService, CustomCorpusService, DatabaseService, \
- XMLservice, TextService
+ XMLservice, TextService, FrequencyService
from mcserver.config import TestingConfig, Config
from mcserver.models_auto import Corpus, Exercise, UpdateInfo, LearningResult
from mocks import Mocks, MockResponse, MockW2V, MockQuery, TestHelper
@@ -268,12 +267,12 @@ class McTestCase(unittest.TestCase):
db.session.add(ui_file)
db.session.commit()
# create a fake old file, to be deleted on the next GET request
- FileService.create_tmp_file(FileType.xml, "old")
- args: dict = dict(type=FileType.xml.value, id=Mocks.exercise.eid, solution_indices="[0]")
+ FileService.create_tmp_file(FileType.XML, "old")
+ args: dict = dict(type=FileType.XML, id=Mocks.exercise.eid, solution_indices=[0])
response: Response = Mocks.app_dict[self.class_name].client.get(TestingConfig.SERVER_URI_FILE,
query_string=args)
self.assertEqual(response.status_code, 404)
- file_path: str = os.path.join(Config.TMP_DIRECTORY, Mocks.exercise.eid + "." + FileType.xml.value)
+ file_path: str = os.path.join(Config.TMP_DIRECTORY, Mocks.exercise.eid + "." + FileType.XML)
file_content: str = ""
with open(file_path, "w+") as f:
f.write(file_content)
@@ -286,7 +285,7 @@ class McTestCase(unittest.TestCase):
# add the mapped exercise to the database
db.session.add(Mocks.exercise)
db.session.commit()
- args["type"] = FileType.pdf.value
+ args["type"] = FileType.PDF
response = Mocks.app_dict[self.class_name].client.get(TestingConfig.SERVER_URI_FILE, query_string=args)
# the PDFs are not deterministically reproducible because the creation date etc. is written into them
self.assertTrue(response.data.startswith(Mocks.exercise_pdf))
@@ -301,7 +300,7 @@ class McTestCase(unittest.TestCase):
data=data_dict)
lrs: List[LearningResult] = db.session.query(LearningResult).all()
self.assertEqual(len(lrs), 1)
- data_dict: dict = dict(file_type=FileType.xml.name, urn=Mocks.urn_custom, html_content="")
+ data_dict: dict = dict(file_type=FileType.XML, urn=Mocks.urn_custom, html_content="")
response: Response = Mocks.app_dict[self.class_name].client.post(TestingConfig.SERVER_URI_FILE,
headers=Mocks.headers_form_data,
data=data_dict)
@@ -313,11 +312,11 @@ class McTestCase(unittest.TestCase):
def test_api_frequency_get(self):
""" Requests a frequency analysis for a given URN. """
with patch.object(mcserver.app.services.corpusService.requests, "get", return_value=MockResponse(
- json.dumps([FrequencyItem(values=[], phenomena=[], count=[]).serialize()]))):
+ json.dumps([FrequencyItem(values=[], phenomena=[], count=0).to_dict()]))):
response: Response = Mocks.app_dict[self.class_name].client.get(TestingConfig.SERVER_URI_FREQUENCY,
query_string=dict(urn=Mocks.urn_custom))
- result_list: List[dict] = json.loads(response.data.decode("utf-8"))
- fa: FrequencyAnalysis = FrequencyAnalysis(json_list=result_list)
+ result_list: List[dict] = json.loads(response.get_data(as_text=True))
+ fa: List[FrequencyItem] = [FrequencyItem.from_dict(x) for x in result_list]
self.assertEqual(len(fa), 1)
def test_api_h5p_get(self):
@@ -637,13 +636,13 @@ class CsmTestCase(unittest.TestCase):
matches: List[str] = json.loads(response.get_data())
self.assertEqual(len(matches), 6)
solutions: List[Solution] = CorpusService.get_matches(Mocks.urn_custom, ['tok ->dep tok'],
- [Phenomenon.dependency])
+ [Phenomenon.DEPENDENCY])
self.assertEqual(len(solutions), 5)
solutions = CorpusService.get_matches(Mocks.urn_custom, ['upostag="VERB" ->dep tok'],
- [Phenomenon.partOfSpeech, Phenomenon.dependency])
+ [Phenomenon.UPOSTAG, Phenomenon.DEPENDENCY])
self.assertEqual(len(solutions), 5)
solutions = CorpusService.get_matches(Mocks.urn_custom, ['tok ->dep tok ->dep tok'],
- [Phenomenon.dependency, Phenomenon.partOfSpeech])
+ [Phenomenon.DEPENDENCY, Phenomenon.UPOSTAG])
self.assertEqual(len(solutions), 3)
def test_api_csm_get(self):
@@ -665,16 +664,14 @@ class CsmTestCase(unittest.TestCase):
def test_api_frequency_get(self):
""" Requests a frequency analysis for a given URN. """
- expected_fa: FrequencyAnalysis = FrequencyAnalysis()
- expected_fa.append(
- FrequencyItem(values=[Dependency.object.name], phenomena=[Phenomenon.dependency], count=1))
- expected_fa.append(
- FrequencyItem(values=[PartOfSpeech.adjective.name], phenomena=[Phenomenon.partOfSpeech], count=1))
+ expected_fa: List[FrequencyItem] = [
+ FrequencyItem(values=[Dependency.object.name], phenomena=[Phenomenon.DEPENDENCY], count=1),
+ FrequencyItem(values=[PartOfSpeech.adjective.name], phenomena=[Phenomenon.UPOSTAG], count=1)]
with patch.object(CorpusService, "get_frequency_analysis", return_value=expected_fa):
response: Response = Mocks.app_dict[self.class_name].client.get(TestingConfig.SERVER_URI_FREQUENCY,
query_string=dict(urn=Mocks.urn_custom))
- result_list: List[dict] = json.loads(response.data.decode("utf-8"))
- fa: FrequencyAnalysis = FrequencyAnalysis(json_list=result_list)
+ result_list: List[dict] = json.loads(response.get_data(as_text=True))
+ fa: List[FrequencyItem] = [FrequencyItem.from_dict(x) for x in result_list]
self.assertEqual(fa[0].values, expected_fa[0].values)
self.assertEqual(fa[1].values[0], None)
@@ -728,11 +725,11 @@ class CsmTestCase(unittest.TestCase):
Mocks.app_dict[self.class_name].client.get(TestingConfig.SERVER_URI_CSM,
query_string=dict(urn=Mocks.urn_custom))
data_dict: dict = dict(title=Mocks.exercise.urn, annotations=Mocks.exercise.conll, aqls=Mocks.aqls,
- exercise_type=ExerciseType.cloze.name, search_phenomena=[Phenomenon.partOfSpeech.name])
+ exercise_type=ExerciseType.cloze.name, search_phenomena=[Phenomenon.UPOSTAG])
first_response: Response = Mocks.app_dict[self.class_name].client.post(TestingConfig.SERVER_URI_CSM,
data=json.dumps(data_dict))
# ANNIS does not create deterministically reproducible results, so we only test for a substring
- self.assertIn(Mocks.graph_data_raw_part, first_response.data.decode("utf-8"))
+ self.assertIn(Mocks.graph_data_raw_part, first_response.get_data(as_text=True))
third_response: Response = Mocks.app_dict[self.class_name].client.post(TestingConfig.SERVER_URI_CSM,
data=data_dict)
# Response: Bad Request
@@ -781,13 +778,13 @@ class CsmTestCase(unittest.TestCase):
def test_get_frequency_analysis(self):
""" Gets a frequency analysis by calling the CSM. """
with patch.object(mcserver.app.services.corpusService.requests, "get", return_value=MockResponse(
- json.dumps([FrequencyItem(values=[], phenomena=[], count=[]).serialize()]))):
- fa: FrequencyAnalysis = CorpusService.get_frequency_analysis(urn=Mocks.urn_custom, is_csm=False)
+ json.dumps([FrequencyItem(values=[], phenomena=[], count=0).to_dict()]))):
+ fa: List[FrequencyItem] = CorpusService.get_frequency_analysis(urn=Mocks.urn_custom, is_csm=False)
self.assertEqual(len(fa), 1)
CorpusService.get_corpus(Mocks.urn_custom, True)
with patch.object(CorpusService, "get_corpus", return_value=Mocks.annis_response):
fa = CorpusService.get_frequency_analysis(Mocks.urn_custom, True)
- self.assertEqual(len(fa), 191)
+ self.assertEqual(len(fa), 163)
def test_get_graph(self):
""" Retrieves a graph from the cache or, if not there, builds it from scratch. """
@@ -818,7 +815,7 @@ class CsmTestCase(unittest.TestCase):
cs=Config.CORPUS_STORAGE_MANAGER, file_name=disk_urn)
result: dict = CorpusService.process_corpus_data(urn=Mocks.urn_custom, annotations=Mocks.annotations,
aqls=["upostag"], exercise_type=ExerciseType.cloze,
- search_phenomena=[Phenomenon.partOfSpeech])
+ search_phenomena=[Phenomenon.UPOSTAG])
gd: GraphData = AnnotationService.map_graph_data(result["graph_data_raw"])
self.assertEqual(len(gd.nodes), len(Mocks.nodes))
urn_parts: List[str] = Mocks.urn_custom.split(":")
@@ -848,6 +845,15 @@ class CommonTestCase(unittest.TestCase):
"""Finishes testing by removing the traces."""
print("{0}: {1} seconds".format(self.id(), "%.2f" % (time.time() - self.start_time)))
+ def test_add_dependency_frequencies(self):
+ """ Performs a frequency analysis for dependency annotations in a corpus. """
+ gd: GraphData = GraphData.from_dict(Mocks.graph_data.to_dict())
+ gd.links[0].udep_deprel = "safebpfw"
+ gd.links[48].udep_deprel = "fkonürwür"
+ fis: List[FrequencyItem] = []
+ FrequencyService.add_dependency_frequencies(gd, fis)
+ self.assertEqual(len(fis), 134)
+
def test_add_edges(self):
"""Adds edges to an existing graph based on a list of keys and constraints to their similarity and frequency."""
from mcserver.app.api.vectorNetworkAPI import add_edges
@@ -874,7 +880,7 @@ class CommonTestCase(unittest.TestCase):
"""Exports the exercise data to the Moodle XML format. See https://docs.moodle.org/35/en/Moodle_XML_format ."""
xml_string: str = XMLservice.create_xml_string(
ExerciseMC.from_dict(exercise_type=ExerciseType.matching.value, last_access_time=0, eid=str(uuid.uuid4())),
- [], FileType.pdf, [])
+ [], FileType.PDF, [])
self.assertEqual(xml_string, Mocks.exercise_xml)
def test_dependency_imports(self):
@@ -945,10 +951,10 @@ class CommonTestCase(unittest.TestCase):
""" Builds an HTML string from an exercise, e.g. to construct a PDF from it. """
Mocks.exercise.exercise_type = ExerciseType.matching.value
solutions: List[Solution] = [Solution.from_dict(x) for x in json.loads(Mocks.exercise.solutions)]
- result: str = FileService.get_pdf_html_string(Mocks.exercise, Mocks.annotations, FileType.pdf, solutions)
+ result: str = FileService.get_pdf_html_string(Mocks.exercise, Mocks.annotations, FileType.PDF, solutions)
self.assertEqual(result, '
:
')
Mocks.exercise.exercise_type = ExerciseType.markWords.value
- result = FileService.get_pdf_html_string(Mocks.exercise, Mocks.annotations, FileType.pdf, solutions)
+ result = FileService.get_pdf_html_string(Mocks.exercise, Mocks.annotations, FileType.PDF, solutions)
self.assertEqual(result, ':
Caesar et Galli fortes sunt.
')
Mocks.exercise.exercise_type = ExerciseType.cloze.value
@@ -1063,33 +1069,33 @@ class CommonTestCase(unittest.TestCase):
""" Saves an exercise to a DOCX file (e.g. for later download). """
file_path: str = os.path.join(Config.TMP_DIRECTORY, "make_docx_file.docx")
solutions: List[Solution] = [Solution.from_dict(x) for x in json.loads(Mocks.exercise.solutions)]
- FileService.make_docx_file(Mocks.exercise, file_path, Mocks.annotations, FileType.docx, solutions)
+ FileService.make_docx_file(Mocks.exercise, file_path, Mocks.annotations, FileType.DOCX, solutions)
self.assertEqual(os.path.getsize(file_path), 36611)
Mocks.exercise.exercise_type = ExerciseType.markWords.value
- FileService.make_docx_file(Mocks.exercise, file_path, Mocks.annotations, FileType.docx, solutions)
+ FileService.make_docx_file(Mocks.exercise, file_path, Mocks.annotations, FileType.DOCX, solutions)
self.assertEqual(os.path.getsize(file_path), 36599)
Mocks.exercise.exercise_type = ExerciseType.matching.value
- FileService.make_docx_file(Mocks.exercise, file_path, Mocks.annotations, FileType.docx, solutions)
+ FileService.make_docx_file(Mocks.exercise, file_path, Mocks.annotations, FileType.DOCX, solutions)
self.assertEqual(os.path.getsize(file_path), 36714)
Mocks.exercise.exercise_type = ExerciseType.cloze.value
os.remove(file_path)
def test_make_tmp_file_from_exercise(self):
""" Creates a temporary file from a given exercise, e.g. for downloading. """
- df: DownloadableFile = FileService.make_tmp_file_from_exercise(FileType.xml, Mocks.exercise, [0])
+ df: DownloadableFile = FileService.make_tmp_file_from_exercise(FileType.XML, Mocks.exercise, [0])
self.assertTrue(os.path.exists(df.file_path))
os.remove(df.file_path)
- df: DownloadableFile = FileService.make_tmp_file_from_exercise(FileType.docx, Mocks.exercise, [0])
+ df: DownloadableFile = FileService.make_tmp_file_from_exercise(FileType.DOCX, Mocks.exercise, [0])
self.assertTrue(os.path.exists(df.file_path))
os.remove(df.file_path)
def test_make_tmp_file_from_html(self):
""" Creates a temporary file from a given HTML string, e.g. for downloading. """
html: str = "test
abc"
- df: DownloadableFile = FileService.make_tmp_file_from_html(Mocks.urn_custom, FileType.pdf, html)
+ df: DownloadableFile = FileService.make_tmp_file_from_html(Mocks.urn_custom, FileType.PDF, html)
self.assertTrue(os.path.exists(df.file_path))
os.remove(df.file_path)
- df: DownloadableFile = FileService.make_tmp_file_from_html(Mocks.urn_custom, FileType.docx, html)
+ df: DownloadableFile = FileService.make_tmp_file_from_html(Mocks.urn_custom, FileType.DOCX, html)
self.assertTrue(os.path.exists(df.file_path))
os.remove(df.file_path)
diff --git a/mc_frontend/openapi/api/default.service.ts b/mc_frontend/openapi/api/default.service.ts
index 90aa61b..b746040 100644
--- a/mc_frontend/openapi/api/default.service.ts
+++ b/mc_frontend/openapi/api/default.service.ts
@@ -19,6 +19,10 @@ import { Observable } from 'rxjs';
import { AnnisResponse } from '../model/models';
import { Corpus } from '../model/models';
+import { Exercise } from '../model/models';
+import { FileType } from '../model/models';
+import { FrequencyItem } from '../model/models';
+import { StaticExercise } from '../model/models';
import { BASE_PATH, COLLECTION_FORMATS } from '../variables';
import { Configuration } from '../configuration';
@@ -48,6 +52,19 @@ export class DefaultService {
this.encoder = this.configuration.encoder || new CustomHttpParameterCodec();
}
+ /**
+ * @param consumes string[] mime-types
+ * @return true: consumes contains 'multipart/form-data', false: otherwise
+ */
+ private canConsumeForm(consumes: string[]): boolean {
+ const form = 'multipart/form-data';
+ for (const consume of consumes) {
+ if (form === consume) {
+ return true;
+ }
+ }
+ return false;
+ }
private addToHttpParams(httpParams: HttpParams, value: any, key?: string): HttpParams {
@@ -386,14 +403,274 @@ export class DefaultService {
);
}
+ /**
+ * Provides metadata for all available exercises.
+ * @param lang ISO 639-1 Language Code for the localization of exercise content.
+ * @param frequencyUpperBound Upper bound for reference vocabulary frequency.
+ * @param lastUpdateTime Time (in milliseconds) of the last update.
+ * @param vocabulary Identifier for a reference vocabulary.
+ * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body.
+ * @param reportProgress flag to report request and response progress.
+ */
+ public mcserverAppApiExerciseListAPIGet(lang: string, frequencyUpperBound?: number, lastUpdateTime?: number, vocabulary?: 'agldt' | 'bws' | 'proiel' | 'viva', observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable;
+ public mcserverAppApiExerciseListAPIGet(lang: string, frequencyUpperBound?: number, lastUpdateTime?: number, vocabulary?: 'agldt' | 'bws' | 'proiel' | 'viva', observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>;
+ public mcserverAppApiExerciseListAPIGet(lang: string, frequencyUpperBound?: number, lastUpdateTime?: number, vocabulary?: 'agldt' | 'bws' | 'proiel' | 'viva', observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>;
+ public mcserverAppApiExerciseListAPIGet(lang: string, frequencyUpperBound?: number, lastUpdateTime?: number, vocabulary?: 'agldt' | 'bws' | 'proiel' | 'viva', observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json'}): Observable {
+ if (lang === null || lang === undefined) {
+ throw new Error('Required parameter lang was null or undefined when calling mcserverAppApiExerciseListAPIGet.');
+ }
+
+ let queryParameters = new HttpParams({encoder: this.encoder});
+ if (lang !== undefined && lang !== null) {
+ queryParameters = this.addToHttpParams(queryParameters,
+ lang, 'lang');
+ }
+ if (frequencyUpperBound !== undefined && frequencyUpperBound !== null) {
+ queryParameters = this.addToHttpParams(queryParameters,
+ frequencyUpperBound, 'frequency_upper_bound');
+ }
+ if (lastUpdateTime !== undefined && lastUpdateTime !== null) {
+ queryParameters = this.addToHttpParams(queryParameters,
+ lastUpdateTime, 'last_update_time');
+ }
+ if (vocabulary !== undefined && vocabulary !== null) {
+ queryParameters = this.addToHttpParams(queryParameters,
+ vocabulary, 'vocabulary');
+ }
+
+ let headers = this.defaultHeaders;
+
+ let httpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept;
+ if (httpHeaderAcceptSelected === undefined) {
+ // to determine the Accept header
+ const httpHeaderAccepts: string[] = [
+ 'application/json'
+ ];
+ httpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts);
+ }
+ if (httpHeaderAcceptSelected !== undefined) {
+ headers = headers.set('Accept', httpHeaderAcceptSelected);
+ }
+
+
+ let responseType: 'text' | 'json' = 'json';
+ if(httpHeaderAcceptSelected && httpHeaderAcceptSelected.startsWith('text')) {
+ responseType = 'text';
+ }
+
+ return this.httpClient.get(`${this.configuration.basePath}/exerciseList`,
+ {
+ params: queryParameters,
+ responseType: responseType,
+ withCredentials: this.configuration.withCredentials,
+ headers: headers,
+ observe: observe,
+ reportProgress: reportProgress
+ }
+ );
+ }
+
+ /**
+ * Provides the URL to download a specific file.
+ * @param id Unique identifier (UUID) for an exercise.
+ * @param type File format for the requested download.
+ * @param solutionIndices Indices for the solutions that should be included in the download.
+ * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body.
+ * @param reportProgress flag to report request and response progress.
+ */
+ public mcserverAppApiFileAPIGet(id: string, type: FileType, solutionIndices?: Array, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable;
+ public mcserverAppApiFileAPIGet(id: string, type: FileType, solutionIndices?: Array, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>;
+ public mcserverAppApiFileAPIGet(id: string, type: FileType, solutionIndices?: Array, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>;
+ public mcserverAppApiFileAPIGet(id: string, type: FileType, solutionIndices?: Array, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json'}): Observable {
+ if (id === null || id === undefined) {
+ throw new Error('Required parameter id was null or undefined when calling mcserverAppApiFileAPIGet.');
+ }
+ if (type === null || type === undefined) {
+ throw new Error('Required parameter type was null or undefined when calling mcserverAppApiFileAPIGet.');
+ }
+
+ let queryParameters = new HttpParams({encoder: this.encoder});
+ if (id !== undefined && id !== null) {
+ queryParameters = this.addToHttpParams(queryParameters,
+ id, 'id');
+ }
+ if (type !== undefined && type !== null) {
+ queryParameters = this.addToHttpParams(queryParameters,
+ type, 'type');
+ }
+ if (solutionIndices) {
+ solutionIndices.forEach((element) => {
+ queryParameters = this.addToHttpParams(queryParameters,
+ element, 'solution_indices');
+ })
+ }
+
+ let headers = this.defaultHeaders;
+
+ let httpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept;
+ if (httpHeaderAcceptSelected === undefined) {
+ // to determine the Accept header
+ const httpHeaderAccepts: string[] = [
+ 'application/json'
+ ];
+ httpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts);
+ }
+ if (httpHeaderAcceptSelected !== undefined) {
+ headers = headers.set('Accept', httpHeaderAcceptSelected);
+ }
+
+
+ let responseType: 'text' | 'json' = 'json';
+ if(httpHeaderAcceptSelected && httpHeaderAcceptSelected.startsWith('text')) {
+ responseType = 'text';
+ }
+
+ return this.httpClient.get(`${this.configuration.basePath}/file`,
+ {
+ params: queryParameters,
+ responseType: responseType,
+ withCredentials: this.configuration.withCredentials,
+ headers: headers,
+ observe: observe,
+ reportProgress: reportProgress
+ }
+ );
+ }
+
+ /**
+ * Serializes and persists learning results or HTML content for later access.
+ * @param fileType
+ * @param htmlContent HTML content to be serialized.
+ * @param learningResult Serialized XAPI results for an interactive exercise.
+ * @param urn CTS URN for the text passage from which the HTML content was created.
+ * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body.
+ * @param reportProgress flag to report request and response progress.
+ */
+ public mcserverAppApiFileAPIPost(fileType?: FileType, htmlContent?: string, learningResult?: string, urn?: string, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable;
+ public mcserverAppApiFileAPIPost(fileType?: FileType, htmlContent?: string, learningResult?: string, urn?: string, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>;
+ public mcserverAppApiFileAPIPost(fileType?: FileType, htmlContent?: string, learningResult?: string, urn?: string, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>;
+ public mcserverAppApiFileAPIPost(fileType?: FileType, htmlContent?: string, learningResult?: string, urn?: string, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json'}): Observable {
+
+ let headers = this.defaultHeaders;
+
+ let httpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept;
+ if (httpHeaderAcceptSelected === undefined) {
+ // to determine the Accept header
+ const httpHeaderAccepts: string[] = [
+ 'application/json'
+ ];
+ httpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts);
+ }
+ if (httpHeaderAcceptSelected !== undefined) {
+ headers = headers.set('Accept', httpHeaderAcceptSelected);
+ }
+
+ // to determine the Content-Type header
+ const consumes: string[] = [
+ 'application/x-www-form-urlencoded'
+ ];
+
+ const canConsumeForm = this.canConsumeForm(consumes);
+
+ let formParams: { append(param: string, value: any): any; };
+ let useForm = false;
+ let convertFormParamsToString = false;
+ if (useForm) {
+ formParams = new FormData();
+ } else {
+ formParams = new HttpParams({encoder: this.encoder});
+ }
+
+ if (fileType !== undefined) {
+ formParams = formParams.append('file_type', fileType) as any || formParams;
+ }
+ if (htmlContent !== undefined) {
+ formParams = formParams.append('html_content', htmlContent) as any || formParams;
+ }
+ if (learningResult !== undefined) {
+ formParams = formParams.append('learning_result', learningResult) as any || formParams;
+ }
+ if (urn !== undefined) {
+ formParams = formParams.append('urn', urn) as any || formParams;
+ }
+
+ let responseType: 'text' | 'json' = 'json';
+ if(httpHeaderAcceptSelected && httpHeaderAcceptSelected.startsWith('text')) {
+ responseType = 'text';
+ }
+
+ return this.httpClient.post(`${this.configuration.basePath}/file`,
+ convertFormParamsToString ? formParams.toString() : formParams,
+ {
+ responseType: responseType,
+ withCredentials: this.configuration.withCredentials,
+ headers: headers,
+ observe: observe,
+ reportProgress: reportProgress
+ }
+ );
+ }
+
+ /**
+ * Returns results for a frequency query from ANNIS for a given CTS URN.
+ * @param urn CTS URN for referencing the corpus.
+ * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body.
+ * @param reportProgress flag to report request and response progress.
+ */
+ public mcserverAppApiFrequencyAPIGet(urn: string, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>;
+ public mcserverAppApiFrequencyAPIGet(urn: string, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>>;
+ public mcserverAppApiFrequencyAPIGet(urn: string, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable>>;
+ public mcserverAppApiFrequencyAPIGet(urn: string, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json'}): Observable {
+ if (urn === null || urn === undefined) {
+ throw new Error('Required parameter urn was null or undefined when calling mcserverAppApiFrequencyAPIGet.');
+ }
+
+ let queryParameters = new HttpParams({encoder: this.encoder});
+ if (urn !== undefined && urn !== null) {
+ queryParameters = this.addToHttpParams(queryParameters,
+ urn, 'urn');
+ }
+
+ let headers = this.defaultHeaders;
+
+ let httpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept;
+ if (httpHeaderAcceptSelected === undefined) {
+ // to determine the Accept header
+ const httpHeaderAccepts: string[] = [
+ 'application/json'
+ ];
+ httpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts);
+ }
+ if (httpHeaderAcceptSelected !== undefined) {
+ headers = headers.set('Accept', httpHeaderAcceptSelected);
+ }
+
+
+ let responseType: 'text' | 'json' = 'json';
+ if(httpHeaderAcceptSelected && httpHeaderAcceptSelected.startsWith('text')) {
+ responseType = 'text';
+ }
+
+ return this.httpClient.get>(`${this.configuration.basePath}/frequency`,
+ {
+ params: queryParameters,
+ responseType: responseType,
+ withCredentials: this.configuration.withCredentials,
+ headers: headers,
+ observe: observe,
+ reportProgress: reportProgress
+ }
+ );
+ }
+
/**
* Returns metadata for static exercises.
* @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body.
* @param reportProgress flag to report request and response progress.
*/
- public mcserverAppApiStaticExercisesAPIGet(observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json'}): Observable