diff --git a/openedx/core/djangolib/markup.py b/openedx/core/djangolib/markup.py index c932fb91b974..b69e10b1f91a 100644 --- a/openedx/core/djangolib/markup.py +++ b/openedx/core/djangolib/markup.py @@ -2,7 +2,6 @@ Utilities for use in Mako markup. """ - import markupsafe import nh3 from lxml.html.clean import Cleaner @@ -11,10 +10,10 @@ # Text() can be used to declare a string as plain text, as HTML() is used # for HTML. It simply wraps markupsafe's escape, which will HTML-escape if # it isn't already escaped. -Text = markupsafe.escape # pylint: disable=invalid-name +Text = markupsafe.escape # pylint: disable=invalid-name -def HTML(html): # pylint: disable=invalid-name +def HTML(html): # pylint: disable=invalid-name """ Mark a string as already HTML, so that it won't be escaped before output. @@ -53,7 +52,7 @@ def strip_all_tags_but_br(string_to_strip): string_to_strip = "" string_to_strip = decode.utf8(string_to_strip) - string_to_strip = nh3.clean(string_to_strip, tags={'br'}) + string_to_strip = nh3.clean(string_to_strip, tags={"br"}) return HTML(string_to_strip) diff --git a/openedx/core/djangolib/tests/test_markup.py b/openedx/core/djangolib/tests/test_markup.py index a7a66902af2f..65c47972a2fc 100644 --- a/openedx/core/djangolib/tests/test_markup.py +++ b/openedx/core/djangolib/tests/test_markup.py @@ -2,7 +2,6 @@ Tests for openedx.core.djangolib.markup """ - import unittest import ddt @@ -22,7 +21,7 @@ class FormatHtmlTest(unittest.TestCase): ("hello", "hello"), ("", "<hello>"), ("It's cool", "It's cool"), - ('"cool," she said.', '"cool," she said.'), + ('"cool," she said.', ""cool," she said."), ("Stop & Shop", "Stop & Shop"), ("нтмℓ-єѕ¢αρє∂", "<a>нтмℓ-єѕ¢αρє∂</a>"), ) @@ -59,18 +58,18 @@ def test_mako(self): %> ${Text(_(u"A & {BC}")).format(BC=HTML("B & C"))} """, - default_filters=['decode.utf8', 'h'], + default_filters=["decode.utf8", "h"], ) out = template.render() - assert out.strip() == 'A & B & C' + assert out.strip() == "A & B & C" def test_ungettext(self): for i in [1, 2]: out = Text(ngettext("1 & {}", "2 & {}", i)).format(HTML("<>")) - assert out == f'{i} & <>' + assert out == f"{i} & <>" def test_strip_all_tags_but_br_filter(self): - """ Verify filter removes every tags except br """ + """Verify filter removes every tags except br""" template = Template( """ <%page expression_filter="h"/> @@ -82,20 +81,20 @@ def test_strip_all_tags_but_br_filter(self): ) rendered_template = template.render() - assert '
' in rendered_template - assert ' """ - human_name = _('Custom Evaluated Script') - tags = ['customresponse'] + human_name = _("Custom Evaluated Script") + tags = ["customresponse"] - allowed_inputfields = ['textline', 'textbox', 'crystallography', - 'chemicalequationinput', 'vsepr_input', - 'drag_and_drop_input', 'designprotein2dinput', - 'editageneinput', 'annotationinput', - 'jsinput', 'formulaequationinput'] + allowed_inputfields = [ + "textline", + "textbox", + "crystallography", + "chemicalequationinput", + "vsepr_input", + "drag_and_drop_input", + "designprotein2dinput", + "editageneinput", + "annotationinput", + "jsinput", + "formulaequationinput", + ] code = None expect = None @@ -2098,23 +2087,23 @@ def setup_response(self): # if has an "expect" (or "answer") attribute then save # that - self.expect = contextualize_text(xml.get('expect') or xml.get('answer'), self.context) + self.expect = contextualize_text(xml.get("expect") or xml.get("answer"), self.context) - log.debug('answer_ids=%s', self.answer_ids) + log.debug("answer_ids=%s", self.answer_ids) # the ... stanza should be local to the current . # So try looking there first. self.code = None answer = None try: - answer = xml.xpath('//*[@id=$id]//answer', id=xml.get('id'))[0] + answer = xml.xpath("//*[@id=$id]//answer", id=xml.get("id"))[0] except IndexError: # print "xml = ",etree.tostring(xml,pretty_print=True) # if we have a "cfn" attribute then look for the function specified by cfn, in # the problem context ie the comparison function is defined in the # stanza instead - cfn = xml.get('cfn') + cfn = xml.get("cfn") if cfn: log.debug("cfn = %s", cfn) @@ -2126,42 +2115,37 @@ def setup_response(self): def make_check_function(script_code, cfn): def check_function(expect, ans, **kwargs): extra_args = "".join(", {0}={0}".format(k) for k in kwargs) - code = ( - script_code + "\n" + - "cfn_return = %s(expect, ans%s)\n" % (cfn, extra_args) - ) + code = script_code + "\n" + "cfn_return = %s(expect, ans%s)\n" % (cfn, extra_args) globals_dict = { - 'expect': expect, - 'ans': ans, + "expect": expect, + "ans": ans, } globals_dict.update(kwargs) safe_exec.safe_exec( code, globals_dict, - python_path=self.context['python_path'], - extra_files=self.context['extra_files'], - limit_overrides_context=get_course_id_from_capa_block( - self.capa_block - ), + python_path=self.context["python_path"], + extra_files=self.context["extra_files"], + limit_overrides_context=get_course_id_from_capa_block(self.capa_block), slug=self.id, - random_seed=self.context['seed'], + random_seed=self.context["seed"], unsafely=self.capa_system.can_execute_unsafe_code(), ) - return globals_dict['cfn_return'] + return globals_dict["cfn_return"] + return check_function - self.code = make_check_function(self.context['script_code'], cfn) + self.code = make_check_function(self.context["script_code"], cfn) if not self.code: if answer is None: - log.error("[courseware.capa.responsetypes.customresponse] missing" - " code checking script! id=%s", self.id) - self.code = '' + log.error("[courseware.capa.responsetypes.customresponse] missing code checking script! id=%s", self.id) + self.code = "" else: - answer_src = answer.get('src') + answer_src = answer.get("src") if answer_src is not None: # TODO: this code seems not to be used any more since self.capa_system.filesystem doesn't exist. - self.code = self.capa_system.filesystem.open('src/' + answer_src).read() + self.code = self.capa_system.filesystem.open("src/" + answer_src).read() else: self.code = answer.text @@ -2172,7 +2156,7 @@ def get_score(self, student_answers): """ _ = self.capa_system.i18n.gettext - log.debug('%s: student_answers=%s', str(self), student_answers) + log.debug("%s: student_answers=%s", str(self), student_answers) # ordered list of answer id's # sort the responses on the bases of the problem's position number @@ -2188,83 +2172,81 @@ def get_score(self, student_answers): student_answers=student_answers, ), idset=idset, - err=err + err=err, ) log.error( "[courseware.capa.responsetypes.customresponse] error getting" " student answer from %s" "\n idset = %s, error = %s", - student_answers, idset, err + student_answers, + idset, + err, ) raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from # global variable in context which holds the Presentation MathML from dynamic math input # ordered list of dynamath responses - dynamath = [student_answers.get(k + '_dynamath', None) for k in idset] + dynamath = [student_answers.get(k + "_dynamath", None) for k in idset] # if there is only one box, and it's empty, then don't evaluate if len(idset) == 1 and not submission[0]: # default to no error message on empty answer (to be consistent with other # responsetypes) but allow author to still have the old behavior by setting # empty_answer_err attribute - msg = (HTML('{0}').format(_('No answer entered!')) - if self.xml.get('empty_answer_err') else '') - return CorrectMap(idset[0], 'incorrect', msg=msg) + msg = ( + HTML('{0}').format(_("No answer entered!")) + if self.xml.get("empty_answer_err") + else "" + ) + return CorrectMap(idset[0], "incorrect", msg=msg) # NOTE: correct = 'unknown' could be dangerous. Inputtypes such as textline are # not expecting 'unknown's - correct = ['unknown'] * len(idset) - messages = [''] * len(idset) + correct = ["unknown"] * len(idset) + messages = [""] * len(idset) overall_message = "" # put these in the context of the check function evaluator # note that this doesn't help the "cfn" version - only the exec version - self.context.update({ - # my ID - 'response_id': self.id, - - # expected answer (if given as attribute) - 'expect': self.expect, - - # ordered list of student answers from entry boxes in our subtree - 'submission': submission, - - # ordered list of ID's of all entry boxes in our subtree - 'idset': idset, - - # ordered list of all javascript inputs in our subtree - 'dynamath': dynamath, - - # dict of student's responses, with keys being entry box IDs - 'answers': student_answers, - - # the list to be filled in by the check function - 'correct': correct, - - # the list of messages to be filled in by the check function - 'messages': messages, - - # a message that applies to the entire response - # instead of a particular input - 'overall_message': overall_message, - - # any options to be passed to the cfn - 'options': self.xml.get('options'), - 'testdat': 'hello world', - }) + self.context.update( + { + # my ID + "response_id": self.id, + # expected answer (if given as attribute) + "expect": self.expect, + # ordered list of student answers from entry boxes in our subtree + "submission": submission, + # ordered list of ID's of all entry boxes in our subtree + "idset": idset, + # ordered list of all javascript inputs in our subtree + "dynamath": dynamath, + # dict of student's responses, with keys being entry box IDs + "answers": student_answers, + # the list to be filled in by the check function + "correct": correct, + # the list of messages to be filled in by the check function + "messages": messages, + # a message that applies to the entire response + # instead of a particular input + "overall_message": overall_message, + # any options to be passed to the cfn + "options": self.xml.get("options"), + "testdat": "hello world", + } + ) # Pass DEBUG to the check function. - self.context['debug'] = self.capa_system.DEBUG + self.context["debug"] = self.capa_system.DEBUG # Run the check function self.execute_check_function(idset, submission) # build map giving "correct"ness of the answer(s) - correct = self.context['correct'] - messages = self.context['messages'] - overall_message = self.clean_message_html(self.context['overall_message']) - grade_decimals = self.context.get('grade_decimals') + correct = self.context["correct"] + messages = self.context["messages"] + overall_message = self.clean_message_html(self.context["overall_message"]) + grade_decimals = self.context.get("grade_decimals") correct_map = CorrectMap() correct_map.set_overall_message(overall_message) @@ -2274,17 +2256,18 @@ def get_score(self, student_answers): if grade_decimals: npoints = max_points * grade_decimals[k] else: - if correct[k] == 'correct': + if correct[k] == "correct": npoints = max_points - elif correct[k] == 'partially-correct': + elif correct[k] == "partially-correct": npoints = max_points * self.default_pc else: npoints = 0 - correct_map.set(idset[k], correct[k], msg=messages[k], - npoints=npoints) + correct_map.set(idset[k], correct[k], msg=messages[k], npoints=npoints) return correct_map - def execute_check_function(self, idset, submission): # lint-amnesty, pylint: disable=missing-function-docstring, too-many-statements + def execute_check_function( + self, idset, submission + ): # lint-amnesty, pylint: disable=missing-function-docstring, too-many-statements # exec the check function if isinstance(self.code, str): # lint-amnesty, pylint: disable=too-many-nested-blocks try: @@ -2292,13 +2275,11 @@ def execute_check_function(self, idset, submission): # lint-amnesty, pylint: di self.code, self.context, cache=self.capa_system.cache, - python_path=self.context['python_path'], - extra_files=self.context['extra_files'], - limit_overrides_context=get_course_id_from_capa_block( - self.capa_block - ), + python_path=self.context["python_path"], + extra_files=self.context["extra_files"], + limit_overrides_context=get_course_id_from_capa_block(self.capa_block), slug=self.id, - random_seed=self.context['seed'], + random_seed=self.context["seed"], unsafely=self.capa_system.can_execute_unsafe_code(), ) except Exception as err: # pylint: disable=broad-except @@ -2317,10 +2298,7 @@ def execute_check_function(self, idset, submission): # lint-amnesty, pylint: di ret = tutor_cfn(self.expect, answer_given, **kwargs) except Exception as err: # pylint: disable=broad-except self._handle_exec_exception(err) - log.debug( - "[courseware.capa.responsetypes.customresponse.get_score] ret = %s", - ret - ) + log.debug("[courseware.capa.responsetypes.customresponse.get_score] ret = %s", ret) if isinstance(ret, dict): # One kind of dictionary the check function can return has the # form {'ok': BOOLEAN or STRING, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)} @@ -2328,46 +2306,46 @@ def execute_check_function(self, idset, submission): # lint-amnesty, pylint: di # the score the student receives on the response. # If there are multiple inputs, they all get marked # to the same correct/incorrect value - if 'ok' in ret: + if "ok" in ret: # Returning any falsy value or the "false" string for "ok" gives incorrect. # Returning any string that includes "partial" for "ok" gives partial credit. # Returning any other truthy value for "ok" gives correct - ok_val = str(ret['ok']).lower().strip() if bool(ret['ok']) else 'false' + ok_val = str(ret["ok"]).lower().strip() if bool(ret["ok"]) else "false" - if ok_val == 'false': - correct = 'incorrect' - elif 'partial' in ok_val: - correct = 'partially-correct' + if ok_val == "false": + correct = "incorrect" + elif "partial" in ok_val: + correct = "partially-correct" else: - correct = 'correct' - correct = [correct] * len(idset) # All inputs share the same mark. + correct = "correct" + correct = [correct] * len(idset) # All inputs share the same mark. # old version, no partial credit: # correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset) - msg = ret.get('msg', None) + msg = ret.get("msg", None) msg = self.clean_message_html(msg) # If there is only one input, apply the message to that input # Otherwise, apply the message to the whole problem if len(idset) > 1: - self.context['overall_message'] = msg + self.context["overall_message"] = msg else: - self.context['messages'][0] = msg + self.context["messages"][0] = msg - if 'grade_decimal' in ret: - decimal = float(ret['grade_decimal']) + if "grade_decimal" in ret: + decimal = float(ret["grade_decimal"]) else: - if correct[0] == 'correct': + if correct[0] == "correct": decimal = 1.0 - elif correct[0] == 'partially-correct': + elif correct[0] == "partially-correct": decimal = self.default_pc else: decimal = 0.0 grade_decimals = [decimal] * len(idset) - self.context['grade_decimals'] = grade_decimals + self.context["grade_decimals"] = grade_decimals # Another kind of dictionary the check function can return has # the form: @@ -2387,9 +2365,9 @@ def execute_check_function(self, idset, submission): # lint-amnesty, pylint: di # This allows the function to return an 'overall message' # that applies to the entire problem, as well as correct/incorrect # status, scaled grades, and messages for individual inputs - elif 'input_list' in ret: - overall_message = ret.get('overall_message', '') - input_list = ret['input_list'] + elif "input_list" in ret: + overall_message = ret.get("overall_message", "") + input_list = ret["input_list"] correct = [] messages = [] @@ -2400,43 +2378,40 @@ def execute_check_function(self, idset, submission): # lint-amnesty, pylint: di # Returning any other truthy value for "ok" gives correct for input_dict in input_list: - if str(input_dict['ok']).lower().strip() == "false" or not input_dict['ok']: - correct.append('incorrect') - elif 'partial' in str(input_dict['ok']).lower().strip(): - correct.append('partially-correct') + if str(input_dict["ok"]).lower().strip() == "false" or not input_dict["ok"]: + correct.append("incorrect") + elif "partial" in str(input_dict["ok"]).lower().strip(): + correct.append("partially-correct") else: - correct.append('correct') + correct.append("correct") # old version, no partial credit # correct.append('correct' # if input_dict['ok'] else 'incorrect') - msg = (self.clean_message_html(input_dict['msg']) - if 'msg' in input_dict else None) + msg = self.clean_message_html(input_dict["msg"]) if "msg" in input_dict else None messages.append(msg) - if 'grade_decimal' in input_dict: - decimal = input_dict['grade_decimal'] + if "grade_decimal" in input_dict: + decimal = input_dict["grade_decimal"] else: - if str(input_dict['ok']).lower().strip() == 'true': + if str(input_dict["ok"]).lower().strip() == "true": decimal = 1.0 - elif 'partial' in str(input_dict['ok']).lower().strip(): + elif "partial" in str(input_dict["ok"]).lower().strip(): decimal = self.default_pc else: decimal = 0.0 grade_decimals.append(decimal) - self.context['messages'] = messages - self.context['overall_message'] = overall_message - self.context['grade_decimals'] = grade_decimals + self.context["messages"] = messages + self.context["overall_message"] = overall_message + self.context["grade_decimals"] = grade_decimals # Otherwise, we do not recognize the dictionary # Raise an exception else: log.error(traceback.format_exc()) _ = self.capa_system.i18n.gettext - raise ResponseError( - _("CustomResponse: check function returned an invalid dictionary!") - ) + raise ResponseError(_("CustomResponse: check function returned an invalid dictionary!")) else: @@ -2445,17 +2420,17 @@ def execute_check_function(self, idset, submission): # lint-amnesty, pylint: di # Returning any other truthy value for "ok" gives correct if str(ret).lower().strip() == "false" or not bool(ret): - correct = 'incorrect' - elif 'partial' in str(ret).lower().strip(): - correct = 'partially-correct' + correct = "incorrect" + elif "partial" in str(ret).lower().strip(): + correct = "partially-correct" else: - correct = 'correct' + correct = "correct" correct = [correct] * len(idset) # old version, no partial credit: # correct = ['correct' if ret else 'incorrect'] * len(idset) - self.context['correct'] = correct + self.context["correct"] = correct def clean_message_html(self, msg): # lint-amnesty, pylint: disable=missing-function-docstring @@ -2466,19 +2441,19 @@ def clean_message_html(self, msg): # lint-amnesty, pylint: disable=missing-func # When we parse *msg* using etree, there needs to be a root # element, so we wrap the *msg* text in tags - msg = HTML('{msg}').format(msg=HTML(msg)) + msg = HTML("{msg}").format(msg=HTML(msg)) # Replace < characters - msg = msg.replace('<', '<') + msg = msg.replace("<", "<") # Use etree to prettify the HTML - msg = etree.tostring(fromstring_bs(msg), pretty_print=True).decode('utf-8') + msg = etree.tostring(fromstring_bs(msg), pretty_print=True).decode("utf-8") - msg = msg.replace(' ', '') + msg = msg.replace(" ", "") # Remove the tags we introduced earlier, so we're # left with just the prettified message markup - msg = re.sub('(?ms)(.*)', '\\1', msg) + msg = re.sub("(?ms)(.*)", "\\1", msg) # Strip leading and trailing whitespace return msg.strip() @@ -2512,14 +2487,15 @@ def _handle_exec_exception(self, err): """ # Log the error if we are debugging - msg = 'Error occurred while evaluating CustomResponse' + msg = "Error occurred while evaluating CustomResponse" log.warning(msg, exc_info=True) # Notify student with a student input error _, _, traceback_obj = sys.exc_info() raise ResponseError(str(err), traceback_obj) -#----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- @registry.register @@ -2528,31 +2504,33 @@ class SymbolicResponse(CustomResponse): Symbolic math response checking, using symmath library. """ - human_name = _('Symbolic Math Input') - tags = ['symbolicresponse'] + human_name = _("Symbolic Math Input") + tags = ["symbolicresponse"] max_inputfields = 1 def setup_response(self): # Symbolic response always uses symmath_check() # If the XML did not specify this, then set it now # Otherwise, we get an error from the superclass - self.xml.set('cfn', 'symmath_check') + self.xml.set("cfn", "symmath_check") # Let CustomResponse do its setup super(SymbolicResponse, self).setup_response() # lint-amnesty, pylint: disable=super-with-arguments def execute_check_function(self, idset, submission): from symmath import symmath_check + try: # Since we have limited max_inputfields to 1, # we can assume that there is only one submission answer_given = submission[0] ret = symmath_check( - self.expect, answer_given, - dynamath=self.context.get('dynamath'), - options=self.context.get('options'), - debug=self.context.get('debug'), + self.expect, + answer_given, + dynamath=self.context.get("dynamath"), + options=self.context.get("options"), + debug=self.context.get("debug"), ) except Exception as err: log.error("oops in SymbolicResponse (cfn) error %s", err) @@ -2563,10 +2541,11 @@ def execute_check_function(self, idset, submission): error_msg=err, ) raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from - self.context['messages'][0] = self.clean_message_html(ret['msg']) - self.context['correct'] = ['correct' if ret['ok'] else 'incorrect'] * len(idset) + self.context["messages"][0] = self.clean_message_html(ret["msg"]) + self.context["correct"] = ["correct" if ret["ok"] else "incorrect"] * len(idset) + -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- ## ScoreMessage named tuple ## ## valid: Flag indicating valid score_msg format (Boolean) @@ -2574,7 +2553,7 @@ def execute_check_function(self, idset, submission): ## score: Points to be assigned (numeric, can be float) ## msg: Message from grader to display to student (string) -ScoreMessage = namedtuple('ScoreMessage', ['valid', 'correct', 'points', 'msg']) +ScoreMessage = namedtuple("ScoreMessage", ["valid", "correct", "points", "msg"]) @registry.register @@ -2597,9 +2576,9 @@ class CodeResponse(LoncapaResponse): """ - human_name = _('Code Input') - tags = ['coderesponse'] - allowed_inputfields = ['textbox', 'filesubmission', 'matlabinput'] + human_name = _("Code Input") + tags = ["coderesponse"] + allowed_inputfields = ["textbox", "filesubmission", "matlabinput"] max_inputfields = 1 payload = None initial_display = None @@ -2615,19 +2594,19 @@ def setup_response(self): """ xml = self.xml # TODO: XML can override external resource (grader/queue) URL - self.url = xml.get('url', None) + self.url = xml.get("url", None) # We do not support xqueue within Studio. if self.capa_system.xqueue is not None: default_queuename = self.capa_system.xqueue.default_queuename else: default_queuename = None - self.queue_name = xml.get('queuename', default_queuename) + self.queue_name = xml.get("queuename", default_queuename) # VS[compat]: # Check if XML uses the ExternalResponse format or the generic # CodeResponse format - codeparam = self.xml.find('codeparam') + codeparam = self.xml.find("codeparam") assert codeparam is not None, "Unsupported old format! without " self._parse_coderesponse_xml(codeparam) @@ -2638,24 +2617,22 @@ def _parse_coderesponse_xml(self, codeparam): self.answer (an answer to display to the student in the LMS) self.payload """ - grader_payload = codeparam.find('grader_payload') - grader_payload = grader_payload.text if grader_payload is not None else '' + grader_payload = codeparam.find("grader_payload") + grader_payload = grader_payload.text if grader_payload is not None else "" self.payload = { - 'grader_payload': grader_payload, + "grader_payload": grader_payload, } # matlab api key can be defined in course settings. if so, add it to the grader payload - api_key = getattr(self.capa_system, 'matlab_api_key', None) - if api_key and self.xml.find('matlabinput') is not None: - self.payload['token'] = api_key - self.payload['endpoint_version'] = "2" - self.payload['requestor_id'] = self.capa_system.anonymous_student_id - - self.initial_display = find_with_default( - codeparam, 'initial_display', '') + api_key = getattr(self.capa_system, "matlab_api_key", None) + if api_key and self.xml.find("matlabinput") is not None: + self.payload["token"] = api_key + self.payload["endpoint_version"] = "2" + self.payload["requestor_id"] = self.capa_system.anonymous_student_id + + self.initial_display = find_with_default(codeparam, "initial_display", "") _ = self.capa_system.i18n.gettext - self.answer = find_with_default(codeparam, 'answer_display', - _('No answer provided.')) + self.answer = find_with_default(codeparam, "answer_display", _("No answer provided.")) def get_score(self, student_answers): _ = self.capa_system.i18n.gettext @@ -2664,21 +2641,21 @@ def get_score(self, student_answers): submission = student_answers[self.answer_id] except Exception as err: log.error( - 'Error in CodeResponse %s: cannot get student answer for %s;' - ' student_answers=%s', - err, self.answer_id, convert_files_to_filenames(student_answers) + "Error in CodeResponse %s: cannot get student answer for %s; student_answers=%s", + err, + self.answer_id, + convert_files_to_filenames(student_answers), ) raise Exception(err) # lint-amnesty, pylint: disable=raise-missing-from # We do not support xqueue within Studio. if self.capa_system.xqueue is None: cmap = CorrectMap() - cmap.set(self.answer_id, queuestate=None, - msg=_('Error: No grader has been set up for this problem.')) + cmap.set(self.answer_id, queuestate=None, msg=_("Error: No grader has been set up for this problem.")) return cmap # Prepare xqueue request - #------------------------------------------------------------ + # ------------------------------------------------------------ qinterface = self.capa_system.xqueue.interface qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat) @@ -2691,52 +2668,52 @@ def get_score(self, student_answers): ) callback_url = self.capa_system.xqueue.construct_callback() xheader = xqueue_interface.make_xheader( - lms_callback_url=callback_url, - lms_key=queuekey, - queue_name=self.queue_name + lms_callback_url=callback_url, lms_key=queuekey, queue_name=self.queue_name ) # Generate body if is_list_of_files(submission): # TODO: Get S3 pointer from the Queue - self.context.update({'submission': ''}) + self.context.update({"submission": ""}) else: - self.context.update({'submission': submission}) + self.context.update({"submission": submission}) contents = self.payload.copy() # Metadata related to the student submission revealed to the external # grader student_info = { - 'anonymous_student_id': anonymous_student_id, - 'submission_time': qtime, - 'random_seed': self.context['seed'], + "anonymous_student_id": anonymous_student_id, + "submission_time": qtime, + "random_seed": self.context["seed"], } - contents.update({'student_info': json.dumps(student_info)}) + contents.update({"student_info": json.dumps(student_info)}) # Submit request. When successful, 'msg' is the prior length of the # queue if is_list_of_files(submission): # TODO: Is there any information we want to send here? - contents.update({'student_response': ''}) - (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents), - files_to_upload=submission) + contents.update({"student_response": ""}) + (error, msg) = qinterface.send_to_queue( + header=xheader, body=json.dumps(contents), files_to_upload=submission + ) else: - contents.update({'student_response': submission}) - (error, msg) = qinterface.send_to_queue(header=xheader, - body=json.dumps(contents)) + contents.update({"student_response": submission}) + (error, msg) = qinterface.send_to_queue(header=xheader, body=json.dumps(contents)) # State associated with the queueing request - queuestate = {'key': queuekey, - 'time': qtime, } + queuestate = { + "key": queuekey, + "time": qtime, + } cmap = CorrectMap() if error: _ = self.capa_system.i18n.gettext - error_msg = _('Unable to deliver your submission to grader (Reason: {error_msg}).' - ' Please try again later.').format(error_msg=msg) + error_msg = _( + "Unable to deliver your submission to grader (Reason: {error_msg}). Please try again later." + ).format(error_msg=msg) cmap.set(self.answer_id, queuestate=None, msg=error_msg) else: # Queueing mechanism flags: @@ -2745,8 +2722,7 @@ def get_score(self, student_answers): # 2) Frontend: correctness='incomplete' eventually trickles down # through inputtypes.textbox and .filesubmission to inform the # browser to poll the LMS - cmap.set(self.answer_id, queuestate=queuestate, - correctness='incomplete', msg=msg) + cmap.set(self.answer_id, queuestate=queuestate, correctness="incomplete", msg=msg) return cmap @@ -2758,14 +2734,14 @@ def update_score(self, score_msg, oldcmap, queuekey): if not valid_score_msg: # Translators: 'grader' refers to the edX automatic code grader. - error_msg = _('Invalid grader reply. Please contact the course staff.') + error_msg = _("Invalid grader reply. Please contact the course staff.") oldcmap.set(self.answer_id, msg=error_msg) return oldcmap - correctness = 'correct' if correct else 'incorrect' + correctness = "correct" if correct else "incorrect" # TODO: Find out how this is used elsewhere, if any - self.context['correct'] = correctness + self.context["correct"] = correctness # Replace 'oldcmap' with new grading results if queuekey matches. If queuekey # does not match, we keep waiting for the score_msg whose key actually @@ -2776,14 +2752,14 @@ def update_score(self, score_msg, oldcmap, queuekey): points = 0 # Queuestate is consumed oldcmap.set( - self.answer_id, npoints=points, correctness=correctness, - msg=msg.replace(' ', ' '), queuestate=None) - else: - log.debug( - 'CodeResponse: queuekey %s does not match for answer_id=%s.', - queuekey, - self.answer_id + self.answer_id, + npoints=points, + correctness=correctness, + msg=msg.replace(" ", " "), + queuestate=None, ) + else: + log.debug("CodeResponse: queuekey %s does not match for answer_id=%s.", queuekey, self.answer_id) return oldcmap @@ -2811,21 +2787,20 @@ def _parse_score_msg(self, score_msg): score: Points to be assigned (numeric, can be float) msg: Message from grader to display to student (string) """ - fail = (False, False, 0, '') + fail = (False, False, 0, "") try: score_result = json.loads(score_msg) except (TypeError, ValueError): - log.error("External grader message should be a JSON-serialized dict." - " Received score_msg = %s", score_msg) + log.error("External grader message should be a JSON-serialized dict. Received score_msg = %s", score_msg) return fail if not isinstance(score_result, dict): - log.error("External grader message should be a JSON-serialized dict." - " Received score_result = %s", score_result) + log.error( + "External grader message should be a JSON-serialized dict. Received score_result = %s", score_result + ) return fail - for tag in ['correct', 'score', 'msg']: + for tag in ["correct", "score", "msg"]: if tag not in score_result: - log.error("External grader message is missing one or more required" - " tags: 'correct', 'score', 'msg'") + log.error("External grader message is missing one or more required tags: 'correct', 'score', 'msg'") return fail # Next, we need to check that the contents of the external grader message is safe for the LMS. @@ -2833,7 +2808,7 @@ def _parse_score_msg(self, score_msg): # 2) If it is not valid XML, make sure it is valid HTML. # Note: html5lib parser will try to repair any broken HTML # For example: will become . - msg = score_result['msg'] + msg = score_result["msg"] try: etree.fromstring(msg) @@ -2842,7 +2817,7 @@ def _parse_score_msg(self, score_msg): # XML parser will raise exception, so wee fallback to html5parser, # which will set empty "" values for such attrs. try: - parsed = html5lib.parseFragment(msg, treebuilder='lxml', namespaceHTMLElements=False) + parsed = html5lib.parseFragment(msg, treebuilder="lxml", namespaceHTMLElements=False) except ValueError: # the parsed message might contain strings that are not # xml compatible, in which case, throw the error message @@ -2850,16 +2825,15 @@ def _parse_score_msg(self, score_msg): if not parsed: log.error( - "Unable to parse external grader message as valid" - " XML: score_msg['msg']=%s", + "Unable to parse external grader message as valid XML: score_msg['msg']=%s", msg, ) return fail - return (True, score_result['correct'], score_result['score'], msg) + return (True, score_result["correct"], score_result["score"], msg) -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- @registry.register @@ -2871,44 +2845,42 @@ class ExternalResponse(LoncapaResponse): """ - human_name = _('External Grader') - tags = ['externalresponse'] - allowed_inputfields = ['textline', 'textbox'] + human_name = _("External Grader") + tags = ["externalresponse"] + allowed_inputfields = ["textline", "textbox"] awdmap = { - 'EXACT_ANS': 'correct', # TODO: handle other loncapa responses - 'WRONG_FORMAT': 'incorrect', + "EXACT_ANS": "correct", # TODO: handle other loncapa responses + "WRONG_FORMAT": "incorrect", } def __init__(self, *args, **kwargs): - self.url = '' + self.url = "" self.tests = [] - self.code = '' + self.code = "" super(ExternalResponse, self).__init__(*args, **kwargs) # lint-amnesty, pylint: disable=super-with-arguments def setup_response(self): xml = self.xml # FIXME - hardcoded URL - self.url = xml.get('url') or "http://qisx.mit.edu:8889/pyloncapa" + self.url = xml.get("url") or "http://qisx.mit.edu:8889/pyloncapa" - answer = xml.find('answer') + answer = xml.find("answer") if answer is not None: - answer_src = answer.get('src') + answer_src = answer.get("src") if answer_src is not None: # TODO: this code seems not to be used any more since self.capa_system.filesystem doesn't exist. - self.code = self.capa_system.filesystem.open('src/' + answer_src).read() + self.code = self.capa_system.filesystem.open("src/" + answer_src).read() else: self.code = answer.text else: # no stanza; get code from - """) + """ + ) # Create the problem problem = new_loncapa_problem(xml_str) @@ -119,16 +124,18 @@ def test_render_script(self): rendered_html = etree.XML(problem.get_html()) # Expect that the script element has been removed from the rendered HTML - script_element = rendered_html.find('script') + script_element = rendered_html.find("script") assert script_element is None def test_render_javascript(self): # Generate some XML with a - """) + """ + ) # Create the problem problem = new_loncapa_problem(xml_str) @@ -137,15 +144,15 @@ def test_render_javascript(self): rendered_html = etree.XML(problem.get_html()) # expect the javascript is still present in the rendered html - assert '' in etree.tostring(rendered_html).decode('utf-8') + assert '' in etree.tostring(rendered_html).decode("utf-8") def test_render_response_xml(self): # Generate some XML for a string response kwargs = { - 'question_text': "Test question", - 'explanation_text': "Test explanation", - 'answer': 'Test answer', - 'hints': [('test prompt', 'test_hint', 'test hint text')] + "question_text": "Test question", + "explanation_text": "Test explanation", + "answer": "Test answer", + "hints": [("test prompt", "test_hint", "test hint text")], } xml_str = StringResponseXMLFactory().build_xml(**kwargs) @@ -158,50 +165,50 @@ def test_render_response_xml(self): problem = new_loncapa_problem(xml_str, capa_system=the_system) rendered_html = etree.XML(problem.get_html()) # Expect problem has been turned into a
- assert rendered_html.tag == 'div' + assert rendered_html.tag == "div" # Expect that the response has been turned into a
with correct attributes - response_element = rendered_html.find('div') + response_element = rendered_html.find("div") - assert response_element.tag == 'div' - assert response_element.attrib['aria-label'] == 'Question 1' + assert response_element.tag == "div" + assert response_element.attrib["aria-label"] == "Question 1" # Expect that the response div.wrapper-problem-response # that contains a
for the textline - textline_element = response_element.find('div') - assert textline_element.text == 'Input Template Render' + textline_element = response_element.find("div") + assert textline_element.text == "Input Template Render" # Expect a child
for the solution # with the rendered template solution_element = rendered_html.xpath('//div[@class="input-template-render"]')[0] - assert solution_element.text == 'Input Template Render' + assert solution_element.text == "Input Template Render" # Expect that the template renderer was called with the correct # arguments, once for the textline input and once for # the solution expected_textline_context = { - 'STATIC_URL': '/dummy-static/', - 'status': the_system.STATUS_CLASS('unsubmitted'), - 'value': '', - 'preprocessor': None, - 'msg': '', - 'inline': False, - 'hidden': False, - 'do_math': False, - 'id': '1_2_1', - 'trailing_text': '', - 'size': None, - 'response_data': {'label': 'Test question', 'descriptions': {}}, - 'describedby_html': HTML('aria-describedby="status_1_2_1"') + "STATIC_URL": "/dummy-static/", + "status": the_system.STATUS_CLASS("unsubmitted"), + "value": "", + "preprocessor": None, + "msg": "", + "inline": False, + "hidden": False, + "do_math": False, + "id": "1_2_1", + "trailing_text": "", + "size": None, + "response_data": {"label": "Test question", "descriptions": {}}, + "describedby_html": HTML('aria-describedby="status_1_2_1"'), } - expected_solution_context = {'id': '1_solution_1'} + expected_solution_context = {"id": "1_solution_1"} expected_calls = [ - mock.call('textline.html', expected_textline_context), - mock.call('solutionspan.html', expected_solution_context), - mock.call('textline.html', expected_textline_context), - mock.call('solutionspan.html', expected_solution_context) + mock.call("textline.html", expected_textline_context), + mock.call("solutionspan.html", expected_solution_context), + mock.call("textline.html", expected_textline_context), + mock.call("solutionspan.html", expected_solution_context), ] assert the_system.render_template.call_args_list == expected_calls @@ -225,28 +232,30 @@ def test_correct_aria_label(self): """ problem = new_loncapa_problem(xml) rendered_html = etree.XML(problem.get_html()) - response_elements = rendered_html.findall('div') - assert response_elements[0].attrib['aria-label'] == 'Question 1' - assert response_elements[1].attrib['aria-label'] == 'Question 2' + response_elements = rendered_html.findall("div") + assert response_elements[0].attrib["aria-label"] == "Question 1" + assert response_elements[1].attrib["aria-label"] == "Question 2" def test_render_response_with_overall_msg(self): # CustomResponse script that sets an overall_message - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(*args): msg = '

Test message 1

Test message 2

' return {'overall_message': msg, 'input_list': [ {'ok': True, 'msg': '' } ] } - """) + """ + ) # Generate some XML for a CustomResponse - kwargs = {'script': script, 'cfn': 'check_func'} + kwargs = {"script": script, "cfn": "check_func"} xml_str = CustomResponseXMLFactory().build_xml(**kwargs) # Create the problem and render the html problem = new_loncapa_problem(xml_str) # Grade the problem - problem.grade_answers({'1_2_1': 'test'}) + problem.grade_answers({"1_2_1": "test"}) # Render the html rendered_html = etree.XML(problem.get_html()) @@ -254,45 +263,49 @@ def check_func(*args): # Expect that there is a
within the response
# with css class response_message msg_div_element = rendered_html.find(".//div[@class='response_message']") - assert msg_div_element.tag == 'div' - assert msg_div_element.get('class') == 'response_message' + assert msg_div_element.tag == "div" + assert msg_div_element.get("class") == "response_message" # Expect that the
contains our message (as part of the XML tree) - msg_p_elements = msg_div_element.findall('p') - assert msg_p_elements[0].tag == 'p' - assert msg_p_elements[0].text == 'Test message 1' + msg_p_elements = msg_div_element.findall("p") + assert msg_p_elements[0].tag == "p" + assert msg_p_elements[0].text == "Test message 1" - assert msg_p_elements[1].tag == 'p' - assert msg_p_elements[1].text == 'Test message 2' + assert msg_p_elements[1].tag == "p" + assert msg_p_elements[1].text == "Test message 2" def test_substitute_python_vars(self): # Generate some XML with Python variables defined in a script # and used later as attributes - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ - """) + """ + ) # Create the problem and render the HTML problem = new_loncapa_problem(xml_str) rendered_html = etree.XML(problem.get_html()) # Expect that the variable $test has been replaced with its value - span_element = rendered_html.find('span') - assert span_element.get('attr') == 'TEST' + span_element = rendered_html.find("span") + assert span_element.get("attr") == "TEST" def test_xml_comments_and_other_odd_things(self): # Comments and processing instructions should be skipped. - xml_str = textwrap.dedent("""\ + xml_str = textwrap.dedent( + """\ - """) + """ + ) # Create the problem problem = new_loncapa_problem(xml_str) diff --git a/xmodule/capa/tests/test_input_templates.py b/xmodule/capa/tests/test_input_templates.py index 3048f62095de..b47f35f2548d 100644 --- a/xmodule/capa/tests/test_input_templates.py +++ b/xmodule/capa/tests/test_input_templates.py @@ -2,7 +2,6 @@ Tests for the logic in input type mako templates. """ - import json import unittest from collections import OrderedDict @@ -11,9 +10,9 @@ from mako import exceptions from six.moves import range +from openedx.core.djangolib.markup import HTML from xmodule.capa.inputtypes import Status from xmodule.capa.tests.helpers import capa_render_template -from openedx.core.djangolib.markup import HTML from xmodule.stringify import stringify_children @@ -21,6 +20,7 @@ class TemplateError(Exception): """ Error occurred while rendering a Mako template. """ + pass # lint-amnesty, pylint: disable=unnecessary-pass @@ -36,16 +36,10 @@ class TemplateTestCase(unittest.TestCase): TEMPLATE_NAME = None DESCRIBEDBY = 'aria-describedby="desc-1 desc-2"' DESCRIPTIONS = OrderedDict( - [ - ('desc-1', 'description text 1'), - ('desc-2', 'description text 2') - ] + [("desc-1", "description text 1"), ("desc-2", "description text 2")] ) - DESCRIPTION_IDS = ' '.join(list(DESCRIPTIONS.keys())) - RESPONSE_DATA = { - 'label': 'question text 101', - 'descriptions': DESCRIPTIONS - } + DESCRIPTION_IDS = " ".join(list(DESCRIPTIONS.keys())) + RESPONSE_DATA = {"label": "question text 101", "descriptions": DESCRIPTIONS} def setUp(self): """ @@ -64,7 +58,9 @@ def render_to_xml(self, context_dict): try: xml_str = capa_render_template(self.TEMPLATE_NAME, context_dict) except: - raise TemplateError(exceptions.text_error_template().render()) # lint-amnesty, pylint: disable=raise-missing-from + raise TemplateError( # lint-amnesty, pylint: disable=raise-missing-from + exceptions.text_error_template().render() + ) # Attempt to construct an XML tree from the template # This makes it easy to use XPath to make assertions, rather @@ -74,8 +70,9 @@ def render_to_xml(self, context_dict): try: xml = etree.fromstring("" + xml_str + "") except Exception as exc: - raise TemplateError("Could not parse XML from '{0}': {1}".format( # lint-amnesty, pylint: disable=raise-missing-from - xml_str, str(exc))) + raise TemplateError( # lint-amnesty, pylint: disable=raise-missing-from + "Could not parse XML from '{0}': {1}".format(xml_str, str(exc)) + ) return xml def assert_has_xpath(self, xml_root, xpath, context_dict, exact_num=1): @@ -87,8 +84,12 @@ def assert_has_xpath(self, xml_root, xpath, context_dict, exact_num=1): `context` is used to print a debugging message `exact_num` is the exact number of matches to expect. """ - message = ("XML does not have %d match(es) for xpath '%s'\nXML: %s\nContext: %s" - % (exact_num, str(xpath), etree.tostring(xml_root), str(context_dict))) + message = "XML does not have %d match(es) for xpath '%s'\nXML: %s\nContext: %s" % ( + exact_num, + str(xpath), + etree.tostring(xml_root), + str(context_dict), + ) assert len(xml_root.xpath(xpath)) == exact_num, message @@ -116,7 +117,7 @@ def assert_has_text(self, xml_root, xpath, text, exact=True): If no elements are found, the assertion fails. """ element_list = xml_root.xpath(xpath) - assert len(element_list) > 0, ("Could not find element at '%s'\n%s" % (str(xpath), etree.tostring(xml_root))) + assert len(element_list) > 0, "Could not find element at '%s'\n%s" % (str(xpath), etree.tostring(xml_root)) if exact: assert text == element_list[0].text.strip() else: @@ -133,7 +134,7 @@ def assert_description(self, describedby_xpaths): # Verify that each description

tag has correct id, text and order descriptions = OrderedDict( - (tag.get('id'), stringify_children(tag)) for tag in xml.xpath('//p[@class="question-description"]') + (tag.get("id"), stringify_children(tag)) for tag in xml.xpath('//p[@class="question-description"]') ) assert self.DESCRIPTIONS == descriptions @@ -154,7 +155,7 @@ def assert_describedby_attribute(self, describedby_xpaths): Arguments: describedby_xpaths (list): list of xpaths to check aria-describedby attribute """ - self.context['describedby_html'] = '' + self.context["describedby_html"] = "" xml = self.render_to_xml(self.context) # for each xpath verify that description_ids are set correctly @@ -171,15 +172,15 @@ def assert_status(self, status_div=False, status_class=False): status_class (bool): check presence of status class """ cases = [ - ('correct', 'correct'), - ('unsubmitted', 'unanswered'), - ('submitted', 'submitted'), - ('incorrect', 'incorrect'), - ('incomplete', 'incorrect') + ("correct", "correct"), + ("unsubmitted", "unanswered"), + ("submitted", "submitted"), + ("incorrect", "incorrect"), + ("incomplete", "incorrect"), ] for context_status, div_class in cases: - self.context['status'] = Status(context_status) + self.context["status"] = Status(context_status) xml = self.render_to_xml(self.context) # Expect that we get a

with correct class @@ -191,10 +192,8 @@ def assert_status(self, status_div=False, status_class=False): # (used to by CSS to draw the green check / red x) self.assert_has_text( xml, - "//span[@class='status {}']/span[@class='sr']".format( - div_class if status_class else '' - ), - self.context['status'].display_name + "//span[@class='status {}']/span[@class='sr']".format(div_class if status_class else ""), + self.context["status"].display_name, ) def assert_label(self, xpath=None, aria_label=False): @@ -207,33 +206,28 @@ def assert_label(self, xpath=None, aria_label=False): """ labels = [ { - 'actual': "You see, but you do not observe. The distinction is clear.", - 'expected': "You see, but you do not observe. The distinction is clear.", + "actual": "You see, but you do not observe. The distinction is clear.", + "expected": "You see, but you do not observe. The distinction is clear.", }, { - 'actual': "I choose to have faith because without that, I have nothing.", - 'expected': "I choose to have faith because without that, I have nothing.", - } + "actual": "I choose to have faith because without that, I have nothing.", + "expected": "I choose to have faith because without that, I have nothing.", + }, ] - response_data = { - 'response_data': { - 'descriptions': {}, - 'label': '' - } - } + response_data = {"response_data": {"descriptions": {}, "label": ""}} self.context.update(response_data) for label in labels: - self.context['response_data']['label'] = label['actual'] + self.context["response_data"]["label"] = label["actual"] xml = self.render_to_xml(self.context) if aria_label: - self.assert_has_xpath(xml, "//*[@aria-label='%s']" % label['expected'], self.context) + self.assert_has_xpath(xml, "//*[@aria-label='%s']" % label["expected"], self.context) else: element_list = xml.xpath(xpath) assert len(element_list) == 1 - assert stringify_children(element_list[0]) == label['actual'] + assert stringify_children(element_list[0]) == label["actual"] class ChoiceGroupTemplateTest(TemplateTestCase): @@ -241,20 +235,20 @@ class ChoiceGroupTemplateTest(TemplateTestCase): Test mako template for `` input. """ - TEMPLATE_NAME = 'choicegroup.html' + TEMPLATE_NAME = "choicegroup.html" def setUp(self): super(ChoiceGroupTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments - choices = [('1', 'choice 1'), ('2', 'choice 2'), ('3', 'choice 3')] + choices = [("1", "choice 1"), ("2", "choice 2"), ("3", "choice 3")] self.context = { - 'id': '1', - 'choices': choices, - 'status': Status('correct'), - 'input_type': 'checkbox', - 'name_array_suffix': '1', - 'value': '3', - 'response_data': self.RESPONSE_DATA, - 'describedby_html': HTML(self.DESCRIBEDBY), + "id": "1", + "choices": choices, + "status": Status("correct"), + "input_type": "checkbox", + "name_array_suffix": "1", + "value": "3", + "response_data": self.RESPONSE_DATA, + "describedby_html": HTML(self.DESCRIBEDBY), } def test_problem_marked_correct(self): @@ -263,9 +257,9 @@ def test_problem_marked_correct(self): (not a particular option) is marked correct. """ - self.context['status'] = Status('correct') - self.context['input_type'] = 'checkbox' - self.context['value'] = ['1', '2'] + self.context["status"] = Status("correct") + self.context["input_type"] = "checkbox" + self.context["value"] = ["1", "2"] # Should mark the entire problem correct xml = self.render_to_xml(self.context) @@ -273,11 +267,9 @@ def test_problem_marked_correct(self): self.assert_has_xpath(xml, xpath, self.context) # Should NOT mark individual options - self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']", self.context) - self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']", self.context) def test_problem_marked_incorrect(self): """ @@ -285,12 +277,13 @@ def test_problem_marked_incorrect(self): (not a particular option) is marked incorrect. """ conditions = [ - {'status': Status('incorrect'), 'input_type': 'checkbox', 'value': []}, - {'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2']}, - {'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2', '3']}, - {'status': Status('incomplete'), 'input_type': 'checkbox', 'value': []}, - {'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2']}, - {'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2', '3']}] + {"status": Status("incorrect"), "input_type": "checkbox", "value": []}, + {"status": Status("incorrect"), "input_type": "checkbox", "value": ["2"]}, + {"status": Status("incorrect"), "input_type": "checkbox", "value": ["2", "3"]}, + {"status": Status("incomplete"), "input_type": "checkbox", "value": []}, + {"status": Status("incomplete"), "input_type": "checkbox", "value": ["2"]}, + {"status": Status("incomplete"), "input_type": "checkbox", "value": ["2", "3"]}, + ] for test_conditions in conditions: self.context.update(test_conditions) @@ -299,13 +292,9 @@ def test_problem_marked_incorrect(self): self.assert_has_xpath(xml, xpath, self.context) # Should NOT mark individual options - self.assert_no_xpath(xml, - "//label[@class='choicegroup_incorrect']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']", self.context) - self.assert_no_xpath(xml, - "//label[@class='choicegroup_correct']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']", self.context) def test_problem_marked_unsubmitted(self): """ @@ -313,16 +302,17 @@ def test_problem_marked_unsubmitted(self): (not a particular option) is marked unanswered. """ conditions = [ - {'status': Status('unsubmitted'), 'input_type': 'radio', 'value': ''}, - {'status': Status('unsubmitted'), 'input_type': 'radio', 'value': []}, - {'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': []}, - {'input_type': 'radio', 'value': ''}, - {'input_type': 'radio', 'value': []}, - {'input_type': 'checkbox', 'value': []}, - {'input_type': 'checkbox', 'value': ['1']}, - {'input_type': 'checkbox', 'value': ['1', '2']}] + {"status": Status("unsubmitted"), "input_type": "radio", "value": ""}, + {"status": Status("unsubmitted"), "input_type": "radio", "value": []}, + {"status": Status("unsubmitted"), "input_type": "checkbox", "value": []}, + {"input_type": "radio", "value": ""}, + {"input_type": "radio", "value": []}, + {"input_type": "checkbox", "value": []}, + {"input_type": "checkbox", "value": ["1"]}, + {"input_type": "checkbox", "value": ["1", "2"]}, + ] - self.context['status'] = Status('unanswered') + self.context["status"] = Status("unanswered") for test_conditions in conditions: self.context.update(test_conditions) @@ -331,24 +321,18 @@ def test_problem_marked_unsubmitted(self): self.assert_has_xpath(xml, xpath, self.context) # Should NOT mark individual options - self.assert_no_xpath(xml, - "//label[@class='choicegroup_incorrect']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']", self.context) - self.assert_no_xpath(xml, - "//label[@class='choicegroup_correct']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']", self.context) def test_option_marked_correct(self): """ Test conditions under which a particular option and the entire problem is marked correct. """ - conditions = [ - {'input_type': 'radio', 'value': '2'}, - {'input_type': 'radio', 'value': ['2']}] + conditions = [{"input_type": "radio", "value": "2"}, {"input_type": "radio", "value": ["2"]}] - self.context['status'] = Status('correct') + self.context["status"] = Status("correct") for test_conditions in conditions: self.context.update(test_conditions) @@ -365,11 +349,9 @@ def test_option_marked_incorrect(self): Test conditions under which a particular option and the entire problem is marked incorrect. """ - conditions = [ - {'input_type': 'radio', 'value': '2'}, - {'input_type': 'radio', 'value': ['2']}] + conditions = [{"input_type": "radio", "value": "2"}, {"input_type": "radio", "value": ["2"]}] - self.context['status'] = Status('incorrect') + self.context["status"] = Status("incorrect") for test_conditions in conditions: self.context.update(test_conditions) @@ -393,19 +375,20 @@ def test_never_show_correctness(self): """ conditions = [ - {'input_type': 'radio', 'status': Status('correct'), 'value': ''}, - {'input_type': 'radio', 'status': Status('correct'), 'value': '2'}, - {'input_type': 'radio', 'status': Status('correct'), 'value': ['2']}, - {'input_type': 'radio', 'status': Status('incorrect'), 'value': '2'}, - {'input_type': 'radio', 'status': Status('incorrect'), 'value': []}, - {'input_type': 'radio', 'status': Status('incorrect'), 'value': ['2']}, - {'input_type': 'checkbox', 'status': Status('correct'), 'value': []}, - {'input_type': 'checkbox', 'status': Status('correct'), 'value': ['2']}, - {'input_type': 'checkbox', 'status': Status('incorrect'), 'value': []}, - {'input_type': 'checkbox', 'status': Status('incorrect'), 'value': ['2']}] - - self.context['show_correctness'] = 'never' - self.context['submitted_message'] = 'Test message' + {"input_type": "radio", "status": Status("correct"), "value": ""}, + {"input_type": "radio", "status": Status("correct"), "value": "2"}, + {"input_type": "radio", "status": Status("correct"), "value": ["2"]}, + {"input_type": "radio", "status": Status("incorrect"), "value": "2"}, + {"input_type": "radio", "status": Status("incorrect"), "value": []}, + {"input_type": "radio", "status": Status("incorrect"), "value": ["2"]}, + {"input_type": "checkbox", "status": Status("correct"), "value": []}, + {"input_type": "checkbox", "status": Status("correct"), "value": ["2"]}, + {"input_type": "checkbox", "status": Status("incorrect"), "value": []}, + {"input_type": "checkbox", "status": Status("incorrect"), "value": ["2"]}, + ] + + self.context["show_correctness"] = "never" + self.context["submitted_message"] = "Test message" for test_conditions in conditions: self.context.update(test_conditions) @@ -419,17 +402,12 @@ def test_never_show_correctness(self): self.assert_no_xpath(xml, xpath, self.context) # Should NOT mark individual options - self.assert_no_xpath(xml, - "//label[@class='choicegroup_incorrect']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']", self.context) - self.assert_no_xpath(xml, - "//label[@class='choicegroup_correct']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']", self.context) # Expect to see the message - self.assert_has_text(xml, "//div[@class='capa_alert']", - self.context['submitted_message']) + self.assert_has_text(xml, "//div[@class='capa_alert']", self.context["submitted_message"]) def test_no_message_before_submission(self): """ @@ -438,22 +416,21 @@ def test_no_message_before_submission(self): """ conditions = [ - {'input_type': 'radio', 'status': Status('unsubmitted'), 'value': ''}, - {'input_type': 'radio', 'status': Status('unsubmitted'), 'value': []}, - {'input_type': 'checkbox', 'status': Status('unsubmitted'), 'value': []}, - + {"input_type": "radio", "status": Status("unsubmitted"), "value": ""}, + {"input_type": "radio", "status": Status("unsubmitted"), "value": []}, + {"input_type": "checkbox", "status": Status("unsubmitted"), "value": []}, # These tests expose bug #365 # When the bug is fixed, uncomment these cases. - #{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'}, - #{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']}, - #{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'}, - #{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']}, - #{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}, - #{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}] + # {'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'}, + # {'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']}, + # {'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'}, + # {'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']}, + # {'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}, + # {'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}] ] - self.context['show_correctness'] = 'never' - self.context['submitted_message'] = 'Test message' + self.context["show_correctness"] = "never" + self.context["submitted_message"] = "Test message" for test_conditions in conditions: self.context.update(test_conditions) @@ -472,7 +449,7 @@ def test_description(self): """ Test that correct description information is set on desired elements. """ - xpaths = ['//fieldset/@aria-describedby', '//label/@aria-describedby'] + xpaths = ["//fieldset/@aria-describedby", "//label/@aria-describedby"] self.assert_description(xpaths) self.assert_describedby_attribute(xpaths) @@ -488,27 +465,29 @@ class TextlineTemplateTest(TemplateTestCase): Test mako template for `` input. """ - TEMPLATE_NAME = 'textline.html' + TEMPLATE_NAME = "textline.html" def setUp(self): super(TextlineTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.context = { - 'id': '1', - 'status': Status('correct'), - 'value': '3', - 'preprocessor': None, - 'trailing_text': None, - 'response_data': self.RESPONSE_DATA, - 'describedby_html': HTML(self.DESCRIBEDBY), + "id": "1", + "status": Status("correct"), + "value": "3", + "preprocessor": None, + "trailing_text": None, + "response_data": self.RESPONSE_DATA, + "describedby_html": HTML(self.DESCRIBEDBY), } def test_section_class(self): - cases = [({}, ' capa_inputtype textline'), - ({'do_math': True}, 'text-input-dynamath capa_inputtype textline'), - ({'inline': True}, ' capa_inputtype inline textline'), - ({'do_math': True, 'inline': True}, 'text-input-dynamath capa_inputtype inline textline'), ] + cases = [ + ({}, " capa_inputtype textline"), + ({"do_math": True}, "text-input-dynamath capa_inputtype textline"), + ({"inline": True}, " capa_inputtype inline textline"), + ({"do_math": True, "inline": True}, "text-input-dynamath capa_inputtype inline textline"), + ] - for (context, css_class) in cases: + for context, css_class in cases: base_context = self.context.copy() base_context.update(context) xml = self.render_to_xml(base_context) @@ -528,7 +507,7 @@ def test_label(self): self.assert_label(xpath="//label[@class='problem-group-label']") def test_hidden(self): - self.context['hidden'] = True + self.context["hidden"] = True xml = self.render_to_xml(self.context) xpath = "//div[@style='display:none;']" @@ -538,7 +517,7 @@ def test_hidden(self): self.assert_has_xpath(xml, xpath, self.context) def test_do_math(self): - self.context['do_math'] = True + self.context["do_math"] = True xml = self.render_to_xml(self.context) xpath = "//input[@class='mw-100 math']" @@ -551,15 +530,14 @@ def test_do_math(self): self.assert_has_xpath(xml, xpath, self.context) def test_size(self): - self.context['size'] = '20' + self.context["size"] = "20" xml = self.render_to_xml(self.context) xpath = "//input[@size='20']" self.assert_has_xpath(xml, xpath, self.context) def test_preprocessor(self): - self.context['preprocessor'] = {'class_name': 'test_class', - 'script_src': 'test_script'} + self.context["preprocessor"] = {"class_name": "test_class", "script_src": "test_script"} xml = self.render_to_xml(self.context) xpath = "//div[contains(@class, 'text-input-dynamath_data') and @data-preprocessor='test_class']" @@ -569,24 +547,25 @@ def test_preprocessor(self): self.assert_has_xpath(xml, xpath, self.context) def test_do_inline_and_preprocessor(self): - self.context['preprocessor'] = {'class_name': 'test_class', - 'script_src': 'test_script'} - self.context['inline'] = True + self.context["preprocessor"] = {"class_name": "test_class", "script_src": "test_script"} + self.context["inline"] = True xml = self.render_to_xml(self.context) xpath = "//div[contains(@class, 'text-input-dynamath_data inline') and @data-preprocessor='test_class']" self.assert_has_xpath(xml, xpath, self.context) def test_do_inline(self): - cases = [('correct', 'correct'), - ('unsubmitted', 'unanswered'), - ('incorrect', 'incorrect'), - ('incomplete', 'incorrect')] + cases = [ + ("correct", "correct"), + ("unsubmitted", "unanswered"), + ("incorrect", "incorrect"), + ("incomplete", "incorrect"), + ] - self.context['inline'] = True + self.context["inline"] = True - for (context_status, div_class) in cases: - self.context['status'] = Status(context_status) + for context_status, div_class in cases: + self.context["status"] = Status(context_status) xml = self.render_to_xml(self.context) # Expect that we get a
with correct class @@ -594,17 +573,17 @@ def test_do_inline(self): self.assert_has_xpath(xml, xpath, self.context) def test_message(self): - self.context['msg'] = "Test message" + self.context["msg"] = "Test message" xml = self.render_to_xml(self.context) xpath = "//span[@class='message']" - self.assert_has_text(xml, xpath, self.context['msg']) + self.assert_has_text(xml, xpath, self.context["msg"]) def test_description(self): """ Test that correct description information is set on desired elements. """ - xpaths = ['//input/@aria-describedby'] + xpaths = ["//input/@aria-describedby"] self.assert_description(xpaths) self.assert_describedby_attribute(xpaths) @@ -613,19 +592,20 @@ class FormulaEquationInputTemplateTest(TemplateTestCase): """ Test make template for ``s. """ - TEMPLATE_NAME = 'formulaequationinput.html' + + TEMPLATE_NAME = "formulaequationinput.html" def setUp(self): super(FormulaEquationInputTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.context = { - 'id': 2, - 'value': 'PREFILLED_VALUE', - 'status': Status('unsubmitted'), - 'previewer': 'file.js', - 'reported_status': 'REPORTED_STATUS', - 'trailing_text': None, - 'response_data': self.RESPONSE_DATA, - 'describedby_html': HTML(self.DESCRIBEDBY), + "id": 2, + "value": "PREFILLED_VALUE", + "status": Status("unsubmitted"), + "previewer": "file.js", + "reported_status": "REPORTED_STATUS", + "trailing_text": None, + "response_data": self.RESPONSE_DATA, + "describedby_html": HTML(self.DESCRIBEDBY), } def test_no_size(self): @@ -633,7 +613,7 @@ def test_no_size(self): self.assert_no_xpath(xml, "//input[@size]", self.context) def test_size(self): - self.context['size'] = '40' + self.context["size"] = "40" xml = self.render_to_xml(self.context) self.assert_has_xpath(xml, "//input[@size='40']", self.context) @@ -642,7 +622,7 @@ def test_description(self): """ Test that correct description information is set on desired elements. """ - xpaths = ['//input/@aria-describedby'] + xpaths = ["//input/@aria-describedby"] self.assert_description(xpaths) self.assert_describedby_attribute(xpaths) @@ -664,25 +644,25 @@ class AnnotationInputTemplateTest(TemplateTestCase): Test mako template for `` input. """ - TEMPLATE_NAME = 'annotationinput.html' + TEMPLATE_NAME = "annotationinput.html" def setUp(self): super(AnnotationInputTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.context = { - 'id': 2, - 'value': '

Test value

', - 'title': '

This is a title

', - 'text': '

This is a test.

', - 'comment': '

This is a test comment

', - 'comment_prompt': '

This is a test comment prompt

', - 'comment_value': '

This is the value of a test comment

', - 'tag_prompt': '

This is a tag prompt

', - 'options': [], - 'has_options_value': False, - 'debug': False, - 'status': Status('unsubmitted'), - 'return_to_annotation': False, - 'msg': '

This is a test message

', + "id": 2, + "value": "

Test value

", + "title": "

This is a title

", + "text": "

This is a test.

", + "comment": "

This is a test comment

", + "comment_prompt": "

This is a test comment prompt

", + "comment_value": "

This is the value of a test comment

", + "tag_prompt": "

This is a tag prompt

", + "options": [], + "has_options_value": False, + "debug": False, + "status": Status("unsubmitted"), + "return_to_annotation": False, + "msg": "

This is a test message

", } def test_return_to_annotation(self): @@ -694,12 +674,12 @@ def test_return_to_annotation(self): xpath = "//a[@class='annotation-return']" # If return_to_annotation set, then show the link - self.context['return_to_annotation'] = True + self.context["return_to_annotation"] = True xml = self.render_to_xml(self.context) self.assert_has_xpath(xml, xpath, self.context) # Otherwise, do not show the links - self.context['return_to_annotation'] = False + self.context["return_to_annotation"] = False xml = self.render_to_xml(self.context) self.assert_no_xpath(xml, xpath, self.context) @@ -709,12 +689,11 @@ def test_option_selection(self): """ # Create options 0-4 and select option 2 - self.context['options_value'] = [2] - self.context['options'] = [ - {'id': id_num, - 'choice': 'correct', - 'description': '

Unescaped HTML {0}

'.format(id_num)} - for id_num in range(5)] + self.context["options_value"] = [2] + self.context["options"] = [ + {"id": id_num, "choice": "correct", "description": "

Unescaped HTML {0}

".format(id_num)} + for id_num in range(5) + ] xml = self.render_to_xml(self.context) @@ -723,11 +702,11 @@ def test_option_selection(self): # Since the HTML is unescaped, we can traverse the XML tree for id_num in range(5): xpath = "//span[@data-id='{0}']/p/b".format(id_num) - self.assert_has_text(xml, xpath, 'HTML {0}'.format(id_num), exact=False) + self.assert_has_text(xml, xpath, "HTML {0}".format(id_num), exact=False) # Expect that the correct option is selected xpath = "//span[contains(@class,'selected')]/p/b" - self.assert_has_text(xml, xpath, 'HTML 2', exact=False) + self.assert_has_text(xml, xpath, "HTML 2", exact=False) def test_submission_status(self): """ @@ -735,12 +714,10 @@ def test_submission_status(self): """ # Test cases of `(input_status, expected_css_class)` tuples - test_cases = [('unsubmitted', 'unanswered'), - ('incomplete', 'incorrect'), - ('incorrect', 'incorrect')] + test_cases = [("unsubmitted", "unanswered"), ("incomplete", "incorrect"), ("incorrect", "incorrect")] - for (input_status, expected_css_class) in test_cases: - self.context['status'] = Status(input_status) + for input_status, expected_css_class in test_cases: + self.context["status"] = Status(input_status) xml = self.render_to_xml(self.context) xpath = "//span[@class='status {0}']".format(expected_css_class) @@ -748,8 +725,8 @@ def test_submission_status(self): # If individual options are being marked, then expect # just the option to be marked incorrect, not the whole problem - self.context['has_options_value'] = True - self.context['status'] = Status('incorrect') + self.context["has_options_value"] = True + self.context["status"] = Status("incorrect") xpath = "//span[@class='incorrect']" xml = self.render_to_xml(self.context) self.assert_no_xpath(xml, xpath, self.context) @@ -758,33 +735,33 @@ def test_display_html_comment(self): """ Test that HTML comment and comment prompt render. """ - self.context['comment'] = "

Unescaped comment HTML

" - self.context['comment_prompt'] = "

Prompt prompt HTML

" - self.context['text'] = "

Unescaped text

" + self.context["comment"] = "

Unescaped comment HTML

" + self.context["comment_prompt"] = "

Prompt prompt HTML

" + self.context["text"] = "

Unescaped text

" xml = self.render_to_xml(self.context) # Because the HTML is unescaped, we should be able to # descend to the tag xpath = "//div[@class='block']/p/b" - self.assert_has_text(xml, xpath, 'prompt HTML') + self.assert_has_text(xml, xpath, "prompt HTML") xpath = "//div[@class='block block-comment']/p/b" - self.assert_has_text(xml, xpath, 'comment HTML') + self.assert_has_text(xml, xpath, "comment HTML") xpath = "//div[@class='block block-highlight']/p/b" - self.assert_has_text(xml, xpath, 'text') + self.assert_has_text(xml, xpath, "text") def test_display_html_tag_prompt(self): """ Test that HTML tag prompts render. """ - self.context['tag_prompt'] = "

Unescaped HTML

" + self.context["tag_prompt"] = "

Unescaped HTML

" xml = self.render_to_xml(self.context) # Because the HTML is unescaped, we should be able to # descend to the tag xpath = "//div[@class='block']/p/b" - self.assert_has_text(xml, xpath, 'HTML') + self.assert_has_text(xml, xpath, "HTML") class MathStringTemplateTest(TemplateTestCase): @@ -792,41 +769,39 @@ class MathStringTemplateTest(TemplateTestCase): Test mako template for `` input. """ - TEMPLATE_NAME = 'mathstring.html' + TEMPLATE_NAME = "mathstring.html" def setUp(self): super(MathStringTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments - self.context = {'isinline': False, 'mathstr': '', 'tail': ''} + self.context = {"isinline": False, "mathstr": "", "tail": ""} def test_math_string_inline(self): - self.context['isinline'] = True - self.context['mathstr'] = 'y = ax^2 + bx + c' + self.context["isinline"] = True + self.context["mathstr"] = "y = ax^2 + bx + c" xml = self.render_to_xml(self.context) xpath = "//section[@class='math-string']/span[1]" - self.assert_has_text(xml, xpath, - '[mathjaxinline]y = ax^2 + bx + c[/mathjaxinline]') + self.assert_has_text(xml, xpath, "[mathjaxinline]y = ax^2 + bx + c[/mathjaxinline]") def test_math_string_not_inline(self): - self.context['isinline'] = False - self.context['mathstr'] = 'y = ax^2 + bx + c' + self.context["isinline"] = False + self.context["mathstr"] = "y = ax^2 + bx + c" xml = self.render_to_xml(self.context) xpath = "//section[@class='math-string']/span[1]" - self.assert_has_text(xml, xpath, - '[mathjax]y = ax^2 + bx + c[/mathjax]') + self.assert_has_text(xml, xpath, "[mathjax]y = ax^2 + bx + c[/mathjax]") def test_tail_html(self): - self.context['tail'] = "

This is some tail HTML

" + self.context["tail"] = "

This is some tail HTML

" xml = self.render_to_xml(self.context) # HTML from `tail` should NOT be escaped. # We should be able to traverse it as part of the XML tree xpath = "//section[@class='math-string']/span[2]/p/b" - self.assert_has_text(xml, xpath, 'tail') + self.assert_has_text(xml, xpath, "tail") xpath = "//section[@class='math-string']/span[2]/p/em" - self.assert_has_text(xml, xpath, 'HTML') + self.assert_has_text(xml, xpath, "HTML") class OptionInputTemplateTest(TemplateTestCase): @@ -834,26 +809,25 @@ class OptionInputTemplateTest(TemplateTestCase): Test mako template for `` input. """ - TEMPLATE_NAME = 'optioninput.html' + TEMPLATE_NAME = "optioninput.html" def setUp(self): super(OptionInputTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.context = { - 'id': 2, - 'options': [], - 'status': Status('unsubmitted'), - 'value': 0, - 'default_option_text': 'Select an option', - 'response_data': self.RESPONSE_DATA, - 'describedby_html': HTML(self.DESCRIBEDBY), + "id": 2, + "options": [], + "status": Status("unsubmitted"), + "value": 0, + "default_option_text": "Select an option", + "response_data": self.RESPONSE_DATA, + "describedby_html": HTML(self.DESCRIBEDBY), } def test_select_options(self): # Create options 0-4, and select option 2 - self.context['options'] = [(id_num, 'Option {0}'.format(id_num)) - for id_num in range(5)] - self.context['value'] = 2 + self.context["options"] = [(id_num, "Option {0}".format(id_num)) for id_num in range(5)] + self.context["value"] = 2 xml = self.render_to_xml(self.context) @@ -863,11 +837,11 @@ def test_select_options(self): for id_num in range(5): xpath = "//option[@value='{0}']".format(id_num) - self.assert_has_text(xml, xpath, 'Option {0}'.format(id_num)) + self.assert_has_text(xml, xpath, "Option {0}".format(id_num)) # Should have the correct option selected xpath = "//option[@selected='true']" - self.assert_has_text(xml, xpath, 'Option 2') + self.assert_has_text(xml, xpath, "Option 2") def test_status(self): """ @@ -885,7 +859,7 @@ def test_description(self): """ Test that correct description information is set on desired elements. """ - xpaths = ['//select/@aria-describedby'] + xpaths = ["//select/@aria-describedby"] self.assert_description(xpaths) self.assert_describedby_attribute(xpaths) @@ -895,27 +869,25 @@ class DragAndDropTemplateTest(TemplateTestCase): Test mako template for `` input. """ - TEMPLATE_NAME = 'drag_and_drop_input.html' + TEMPLATE_NAME = "drag_and_drop_input.html" def setUp(self): super(DragAndDropTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments - self.context = {'id': 2, - 'drag_and_drop_json': '', - 'value': 0, - 'status': Status('unsubmitted'), - 'msg': ''} + self.context = {"id": 2, "drag_and_drop_json": "", "value": 0, "status": Status("unsubmitted"), "msg": ""} def test_status(self): # Test cases, where each tuple represents # `(input_status, expected_css_class, expected_text)` - test_cases = [('unsubmitted', 'unanswered', 'unanswered'), - ('correct', 'correct', 'correct'), - ('incorrect', 'incorrect', 'incorrect'), - ('incomplete', 'incorrect', 'incomplete')] + test_cases = [ + ("unsubmitted", "unanswered", "unanswered"), + ("correct", "correct", "correct"), + ("incorrect", "incorrect", "incorrect"), + ("incomplete", "incorrect", "incomplete"), + ] - for (input_status, expected_css_class, expected_text) in test_cases: - self.context['status'] = Status(input_status) + for input_status, expected_css_class, expected_text in test_cases: + self.context["status"] = Status(input_status) xml = self.render_to_xml(self.context) # Expect a
with the status @@ -928,57 +900,63 @@ def test_status(self): def test_drag_and_drop_json_html(self): - json_with_html = json.dumps({'test': '

Unescaped HTML

'}) - self.context['drag_and_drop_json'] = json_with_html + json_with_html = json.dumps({"test": "

Unescaped HTML

"}) + self.context["drag_and_drop_json"] = json_with_html xml = self.render_to_xml(self.context) # Assert that the JSON-encoded string was inserted without # escaping the HTML. We should be able to traverse the XML tree. xpath = "//div[@class='drag_and_drop_problem_json']/p/b" - self.assert_has_text(xml, xpath, 'HTML') + self.assert_has_text(xml, xpath, "HTML") class ChoiceTextGroupTemplateTest(TemplateTestCase): """Test mako template for `` input""" - TEMPLATE_NAME = 'choicetext.html' - VALUE_DICT = {'1_choiceinput_0bc': '1_choiceinput_0bc', '1_choiceinput_0_textinput_0': '0', - '1_choiceinput_1_textinput_0': '0'} - EMPTY_DICT = {'1_choiceinput_0_textinput_0': '', - '1_choiceinput_1_textinput_0': ''} - BOTH_CHOICE_CHECKBOX = {'1_choiceinput_0bc': 'choiceinput_0', - '1_choiceinput_1bc': 'choiceinput_1', - '1_choiceinput_0_textinput_0': '0', - '1_choiceinput_1_textinput_0': '0'} - WRONG_CHOICE_CHECKBOX = {'1_choiceinput_1bc': 'choiceinput_1', - '1_choiceinput_0_textinput_0': '0', - '1_choiceinput_1_textinput_0': '0'} + TEMPLATE_NAME = "choicetext.html" + VALUE_DICT = { + "1_choiceinput_0bc": "1_choiceinput_0bc", + "1_choiceinput_0_textinput_0": "0", + "1_choiceinput_1_textinput_0": "0", + } + EMPTY_DICT = {"1_choiceinput_0_textinput_0": "", "1_choiceinput_1_textinput_0": ""} + BOTH_CHOICE_CHECKBOX = { + "1_choiceinput_0bc": "choiceinput_0", + "1_choiceinput_1bc": "choiceinput_1", + "1_choiceinput_0_textinput_0": "0", + "1_choiceinput_1_textinput_0": "0", + } + WRONG_CHOICE_CHECKBOX = { + "1_choiceinput_1bc": "choiceinput_1", + "1_choiceinput_0_textinput_0": "0", + "1_choiceinput_1_textinput_0": "0", + } def setUp(self): super(ChoiceTextGroupTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments choices = [ ( - '1_choiceinput_0bc', + "1_choiceinput_0bc", [ - {'tail_text': '', 'type': 'text', 'value': '', 'contents': ''}, - {'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_0_textinput_0'}, - ] + {"tail_text": "", "type": "text", "value": "", "contents": ""}, + {"tail_text": "", "type": "textinput", "value": "", "contents": "choiceinput_0_textinput_0"}, + ], ), ( - '1_choiceinput_1bc', + "1_choiceinput_1bc", [ - {'tail_text': '', 'type': 'text', 'value': '', 'contents': ''}, - {'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_1_textinput_0'}, - ] - ) + {"tail_text": "", "type": "text", "value": "", "contents": ""}, + {"tail_text": "", "type": "textinput", "value": "", "contents": "choiceinput_1_textinput_0"}, + ], + ), ] self.context = { - 'id': '1', - 'choices': choices, - 'status': Status('correct'), - 'input_type': 'radio', - 'value': self.VALUE_DICT, - 'response_data': self.RESPONSE_DATA + "id": "1", + "choices": choices, + "status": Status("correct"), + "input_type": "radio", + "value": self.VALUE_DICT, + "response_data": self.RESPONSE_DATA, } def test_grouping_tag(self): @@ -986,13 +964,13 @@ def test_grouping_tag(self): Tests whether we are using a section or a label to wrap choice elements. Section is used for checkbox, so inputting text does not deselect """ - input_tags = ('radio', 'checkbox') - self.context['status'] = Status('correct') + input_tags = ("radio", "checkbox") + self.context["status"] = Status("correct") xpath = "//section[@id='forinput1_choiceinput_0bc']" - self.context['value'] = {} + self.context["value"] = {} for input_type in input_tags: - self.context['input_type'] = input_type + self.context["input_type"] = input_type xml = self.render_to_xml(self.context) self.assert_has_xpath(xml, xpath, self.context) @@ -1000,9 +978,9 @@ def test_problem_marked_correct(self): """Test conditions under which the entire problem (not a particular option) is marked correct""" - self.context['status'] = Status('correct') - self.context['input_type'] = 'checkbox' - self.context['value'] = self.VALUE_DICT + self.context["status"] = Status("correct") + self.context["input_type"] = "checkbox" + self.context["value"] = self.VALUE_DICT # Should mark the entire problem correct xml = self.render_to_xml(self.context) @@ -1010,25 +988,24 @@ def test_problem_marked_correct(self): self.assert_has_xpath(xml, xpath, self.context) # Should NOT mark individual options - self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']", self.context) - self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']", - self.context) + self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']", self.context) def test_problem_marked_incorrect(self): """Test all conditions under which the entire problem (not a particular option) is marked incorrect""" - grouping_tags = {'radio': 'label', 'checkbox': 'section'} + grouping_tags = {"radio": "label", "checkbox": "section"} conditions = [ - {'status': Status('incorrect'), 'input_type': 'radio', 'value': {}}, - {'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX}, - {'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}, - {'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.VALUE_DICT}, - {'status': Status('incomplete'), 'input_type': 'radio', 'value': {}}, - {'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX}, - {'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}, - {'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.VALUE_DICT}] + {"status": Status("incorrect"), "input_type": "radio", "value": {}}, + {"status": Status("incorrect"), "input_type": "checkbox", "value": self.WRONG_CHOICE_CHECKBOX}, + {"status": Status("incorrect"), "input_type": "checkbox", "value": self.BOTH_CHOICE_CHECKBOX}, + {"status": Status("incorrect"), "input_type": "checkbox", "value": self.VALUE_DICT}, + {"status": Status("incomplete"), "input_type": "radio", "value": {}}, + {"status": Status("incomplete"), "input_type": "checkbox", "value": self.WRONG_CHOICE_CHECKBOX}, + {"status": Status("incomplete"), "input_type": "checkbox", "value": self.BOTH_CHOICE_CHECKBOX}, + {"status": Status("incomplete"), "input_type": "checkbox", "value": self.VALUE_DICT}, + ] for test_conditions in conditions: self.context.update(test_conditions) @@ -1037,30 +1014,26 @@ def test_problem_marked_incorrect(self): self.assert_has_xpath(xml, xpath, self.context) # Should NOT mark individual options - grouping_tag = grouping_tags[test_conditions['input_type']] - self.assert_no_xpath(xml, - "//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag), - self.context) + grouping_tag = grouping_tags[test_conditions["input_type"]] + self.assert_no_xpath(xml, "//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag), self.context) - self.assert_no_xpath(xml, - "//{0}[@class='choicetextgroup_correct']".format(grouping_tag), - self.context) + self.assert_no_xpath(xml, "//{0}[@class='choicetextgroup_correct']".format(grouping_tag), self.context) def test_problem_marked_unsubmitted(self): """Test all conditions under which the entire problem (not a particular option) is marked unanswered""" - grouping_tags = {'radio': 'label', 'checkbox': 'section'} + grouping_tags = {"radio": "label", "checkbox": "section"} conditions = [ - {'status': Status('unsubmitted'), 'input_type': 'radio', 'value': {}}, - {'status': Status('unsubmitted'), 'input_type': 'radio', 'value': self.EMPTY_DICT}, - {'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': {}}, - {'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.EMPTY_DICT}, - {'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.VALUE_DICT}, - {'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX}, + {"status": Status("unsubmitted"), "input_type": "radio", "value": {}}, + {"status": Status("unsubmitted"), "input_type": "radio", "value": self.EMPTY_DICT}, + {"status": Status("unsubmitted"), "input_type": "checkbox", "value": {}}, + {"status": Status("unsubmitted"), "input_type": "checkbox", "value": self.EMPTY_DICT}, + {"status": Status("unsubmitted"), "input_type": "checkbox", "value": self.VALUE_DICT}, + {"status": Status("unsubmitted"), "input_type": "checkbox", "value": self.BOTH_CHOICE_CHECKBOX}, ] - self.context['status'] = Status('unanswered') + self.context["status"] = Status("unanswered") for test_conditions in conditions: self.context.update(test_conditions) @@ -1069,23 +1042,18 @@ def test_problem_marked_unsubmitted(self): self.assert_has_xpath(xml, xpath, self.context) # Should NOT mark individual options - grouping_tag = grouping_tags[test_conditions['input_type']] - self.assert_no_xpath(xml, - "//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag), - self.context) + grouping_tag = grouping_tags[test_conditions["input_type"]] + self.assert_no_xpath(xml, "//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag), self.context) - self.assert_no_xpath(xml, - "//{0}[@class='choicetextgroup_correct']".format(grouping_tag), - self.context) + self.assert_no_xpath(xml, "//{0}[@class='choicetextgroup_correct']".format(grouping_tag), self.context) def test_option_marked_correct(self): """Test conditions under which a particular option (not the entire problem) is marked correct.""" - conditions = [ - {'input_type': 'radio', 'value': self.VALUE_DICT}] + conditions = [{"input_type": "radio", "value": self.VALUE_DICT}] - self.context['status'] = Status('correct') + self.context["status"] = Status("correct") for test_conditions in conditions: self.context.update(test_conditions) @@ -1102,10 +1070,9 @@ def test_option_marked_incorrect(self): """Test conditions under which a particular option (not the entire problem) is marked incorrect.""" - conditions = [ - {'input_type': 'radio', 'value': self.VALUE_DICT}] + conditions = [{"input_type": "radio", "value": self.VALUE_DICT}] - self.context['status'] = Status('incorrect') + self.context["status"] = Status("incorrect") for test_conditions in conditions: self.context.update(test_conditions) @@ -1128,15 +1095,15 @@ def test_aria_label(self): class ChemicalEquationTemplateTest(TemplateTestCase): """Test mako template for `` input""" - TEMPLATE_NAME = 'chemicalequationinput.html' + TEMPLATE_NAME = "chemicalequationinput.html" def setUp(self): super(ChemicalEquationTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.context = { - 'id': '1', - 'status': Status('correct'), - 'previewer': 'dummy.js', - 'value': '101', + "id": "1", + "status": Status("correct"), + "previewer": "dummy.js", + "value": "101", } def test_aria_label(self): @@ -1149,24 +1116,24 @@ def test_aria_label(self): class SchematicInputTemplateTest(TemplateTestCase): """Test mako template for `` input""" - TEMPLATE_NAME = 'schematicinput.html' + TEMPLATE_NAME = "schematicinput.html" def setUp(self): super(SchematicInputTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.context = { - 'id': '1', - 'status': Status('correct'), - 'previewer': 'dummy.js', - 'value': '101', - 'STATIC_URL': '/dummy-static/', - 'msg': '', - 'initial_value': 'two large batteries', - 'width': '100', - 'height': '100', - 'parts': 'resistors, capacitors, and flowers', - 'setup_script': '/dummy-static/js/capa/schematicinput.js', - 'analyses': 'fast, slow, and pink', - 'submit_analyses': 'maybe', + "id": "1", + "status": Status("correct"), + "previewer": "dummy.js", + "value": "101", + "STATIC_URL": "/dummy-static/", + "msg": "", + "initial_value": "two large batteries", + "width": "100", + "height": "100", + "parts": "resistors, capacitors, and flowers", + "setup_script": "/dummy-static/js/capa/schematicinput.js", + "analyses": "fast, slow, and pink", + "submit_analyses": "maybe", } def test_aria_label(self): @@ -1181,25 +1148,25 @@ class CodeinputTemplateTest(TemplateTestCase): Test mako template for `` input """ - TEMPLATE_NAME = 'codeinput.html' + TEMPLATE_NAME = "codeinput.html" def setUp(self): super(CodeinputTemplateTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.context = { - 'id': '1', - 'status': Status('correct'), - 'mode': 'parrot', - 'linenumbers': 'false', - 'rows': '37', - 'cols': '11', - 'tabsize': '7', - 'hidden': '', - 'msg': '', - 'value': 'print "good evening"', - 'aria_label': 'python editor', - 'code_mirror_exit_message': 'Press ESC then TAB or click outside of the code editor to exit', - 'response_data': self.RESPONSE_DATA, - 'describedby': HTML(self.DESCRIBEDBY), + "id": "1", + "status": Status("correct"), + "mode": "parrot", + "linenumbers": "false", + "rows": "37", + "cols": "11", + "tabsize": "7", + "hidden": "", + "msg": "", + "value": 'print "good evening"', + "aria_label": "python editor", + "code_mirror_exit_message": "Press ESC then TAB or click outside of the code editor to exit", + "response_data": self.RESPONSE_DATA, + "describedby": HTML(self.DESCRIBEDBY), } def test_label(self): @@ -1213,4 +1180,4 @@ def test_editor_exit_message(self): Verify that editor exit message is rendered. """ xml = self.render_to_xml(self.context) - self.assert_has_text(xml, '//span[@id="cm-editor-exit-message-1"]', self.context['code_mirror_exit_message']) + self.assert_has_text(xml, '//span[@id="cm-editor-exit-message-1"]', self.context["code_mirror_exit_message"]) diff --git a/xmodule/capa/tests/test_inputtypes.py b/xmodule/capa/tests/test_inputtypes.py index 523f303b45e3..c6b2b705adee 100644 --- a/xmodule/capa/tests/test_inputtypes.py +++ b/xmodule/capa/tests/test_inputtypes.py @@ -32,11 +32,11 @@ from pyparsing import ParseException from six.moves import zip +from openedx.core.djangolib.markup import HTML from xmodule.capa import inputtypes from xmodule.capa.checker import DemoSystem from xmodule.capa.tests.helpers import mock_capa_system from xmodule.capa.xqueue_interface import XQUEUE_TIMEOUT -from openedx.core.djangolib.markup import HTML # just a handy shortcut lookup_tag = inputtypes.registry.get_class_for_tag @@ -45,11 +45,8 @@ DESCRIBEDBY = HTML('aria-describedby="status_{status_id} desc-1 desc-2"') # Use TRAILING_TEXT_DESCRIBEDBY when trailing_text is not null TRAILING_TEXT_DESCRIBEDBY = HTML('aria-describedby="trailing_text_{trailing_text_id} status_{status_id} desc-1 desc-2"') -DESCRIPTIONS = OrderedDict([('desc-1', 'description text 1'), ('desc-2', 'description text 2')]) -RESPONSE_DATA = { - 'label': 'question text 101', - 'descriptions': DESCRIPTIONS -} +DESCRIPTIONS = OrderedDict([("desc-1", "description text 1"), ("desc-2", "description text 2")]) +RESPONSE_DATA = {"label": "question text 101", "descriptions": DESCRIPTIONS} def quote_attr(s): @@ -66,27 +63,27 @@ def test_rendering(self): element = etree.fromstring(xml_str) state = { - 'value': 'Down', - 'id': 'sky_input', - 'status': 'answered', - 'default_option_text': 'Select an option', - 'response_data': RESPONSE_DATA + "value": "Down", + "id": "sky_input", + "status": "answered", + "default_option_text": "Select an option", + "response_data": RESPONSE_DATA, } - option_input = lookup_tag('optioninput')(mock_capa_system(), element, state) + option_input = lookup_tag("optioninput")(mock_capa_system(), element, state) context = option_input._get_render_context() # pylint: disable=protected-access - prob_id = 'sky_input' + prob_id = "sky_input" expected = { - 'STATIC_URL': '/dummy-static/', - 'value': 'Down', - 'options': [('Up', 'Up'), ('Down', 'Down'), ('Don\'t know', 'Don\'t know')], - 'status': inputtypes.Status('answered'), - 'msg': '', - 'inline': False, - 'id': prob_id, - 'default_option_text': 'Select an option', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id='sky_input') + "STATIC_URL": "/dummy-static/", + "value": "Down", + "options": [("Up", "Up"), ("Down", "Down"), ("Don't know", "Don't know")], + "status": inputtypes.Status("answered"), + "msg": "", + "inline": False, + "id": prob_id, + "default_option_text": "Select an option", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id="sky_input"), } assert context == expected @@ -101,18 +98,18 @@ def check(input, options): # lint-amnesty, pylint: disable=redefined-builtin expected = [(o, o) for o in options] assert f(input) == expected - check("('a','b')", ['a', 'b']) - check("('a', 'b')", ['a', 'b']) - check("('a b','b')", ['a b', 'b']) - check("('My \"quoted\"place','b')", ['My \"quoted\"place', 'b']) - check("('б','в')", ['б', 'в']) - check("('б', 'в')", ['б', 'в']) - check("('б в','в')", ['б в', 'в']) - check("('Мой \"кавыки\"место','в')", ['Мой \"кавыки\"место', 'в']) + check("('a','b')", ["a", "b"]) + check("('a', 'b')", ["a", "b"]) + check("('a b','b')", ["a b", "b"]) + check("('My \"quoted\"place','b')", ['My "quoted"place', "b"]) + check("('б','в')", ["б", "в"]) + check("('б', 'в')", ["б", "в"]) + check("('б в','в')", ["б в", "в"]) + check("('Мой \"кавыки\"место','в')", ['Мой "кавыки"место', "в"]) # check that escaping single quotes with leading backslash (\') properly works # note: actual input by user will be hasn\'t but json parses it as hasn\\'t - check("('hasnt','hasn't')", ['hasnt', 'hasn\'t']) + check("('hasnt','hasn't')", ["hasnt", "hasn't"]) class ChoiceGroupTest(unittest.TestCase): @@ -120,7 +117,9 @@ class ChoiceGroupTest(unittest.TestCase): Test choice groups, radio groups, and checkbox groups """ - def check_group(self, tag, expected_input_type, expected_suffix): # lint-amnesty, pylint: disable=missing-function-docstring + def check_group( + self, tag, expected_input_type, expected_suffix + ): # lint-amnesty, pylint: disable=missing-function-docstring xml_str = """ <{tag}> This is foil One. @@ -128,48 +127,47 @@ def check_group(self, tag, expected_input_type, expected_suffix): # lint-amnest This is foil Three. This is foil Four. - """.format(tag=tag) + """.format( + tag=tag + ) element = etree.fromstring(xml_str) - state = { - 'value': 'foil3', - 'id': 'sky_input', - 'status': 'answered', - 'response_data': RESPONSE_DATA - } + state = {"value": "foil3", "id": "sky_input", "status": "answered", "response_data": RESPONSE_DATA} the_input = lookup_tag(tag)(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access expected = { - 'STATIC_URL': '/dummy-static/', - 'id': 'sky_input', - 'value': 'foil3', - 'status': inputtypes.Status('answered'), - 'msg': '', - 'input_type': expected_input_type, - 'choices': [('foil1', 'This is foil One.'), - ('foil2', 'This is foil Two.'), - ('foil3', 'This is foil Three.'), - ('foil4', 'This is foil Four.'), ], - 'show_correctness': 'always', - 'submitted_message': 'Answer received.', - 'name_array_suffix': expected_suffix, # what is this for?? - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id='sky_input') + "STATIC_URL": "/dummy-static/", + "id": "sky_input", + "value": "foil3", + "status": inputtypes.Status("answered"), + "msg": "", + "input_type": expected_input_type, + "choices": [ + ("foil1", "This is foil One."), + ("foil2", "This is foil Two."), + ("foil3", "This is foil Three."), + ("foil4", "This is foil Four."), + ], + "show_correctness": "always", + "submitted_message": "Answer received.", + "name_array_suffix": expected_suffix, # what is this for?? + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id="sky_input"), } assert context == expected def test_choicegroup(self): - self.check_group('choicegroup', 'radio', '') + self.check_group("choicegroup", "radio", "") def test_radiogroup(self): - self.check_group('radiogroup', 'radio', '[]') + self.check_group("radiogroup", "radio", "[]") def test_checkboxgroup(self): - self.check_group('checkboxgroup', 'checkbox', '[]') + self.check_group("checkboxgroup", "checkbox", "[]") class JSInputTest(unittest.TestCase): @@ -183,15 +181,15 @@ def test_rendering_default_values(self): """ xml_str = '' expected = { - 'html_file': None, - 'gradefn': "gradefn", - 'get_statefn': None, - 'set_statefn': None, - 'initial_state': None, - 'width': "400", - 'height': "300", - 'title': "Problem Remote Content", - 'sop': None + "html_file": None, + "gradefn": "gradefn", + "get_statefn": None, + "set_statefn": None, + "initial_state": None, + "width": "400", + "height": "300", + "title": "Problem Remote Content", + "sop": None, } self._render_context_test(xml_str, expected) @@ -211,15 +209,15 @@ def test_rendering_provided_values(self): """ expected = { - 'html_file': "https://studio.edx.org/c4x/edX/DemoX/asset/webGLDemo.html", - 'gradefn': "WebGLDemo.getGrade", - 'get_statefn': "WebGLDemo.getState", - 'set_statefn': "WebGLDemo.setState", - 'initial_state': '{"selectedObjects":{"cube":true,"cylinder":false}}', - 'width': "1000", - 'height': "1200", - 'title': "Awesome and fun!", - 'sop': 'false' + "html_file": "https://studio.edx.org/c4x/edX/DemoX/asset/webGLDemo.html", + "gradefn": "WebGLDemo.getGrade", + "get_statefn": "WebGLDemo.getState", + "set_statefn": "WebGLDemo.setState", + "initial_state": '{"selectedObjects":{"cube":true,"cylinder":false}}', + "width": "1000", + "height": "1200", + "title": "Awesome and fun!", + "sop": "false", } self._render_context_test(xml_str, expected) @@ -229,26 +227,23 @@ def _render_context_test(self, xml_str, expected_context): Helper method for testing context based on the provided XML string. """ element = etree.fromstring(xml_str) - state = { - 'value': 103, - 'response_data': RESPONSE_DATA - } - the_input = lookup_tag('jsinput')(mock_capa_system(), element, state) + state = {"value": 103, "response_data": RESPONSE_DATA} + the_input = lookup_tag("jsinput")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access full_expected_context = { - 'STATIC_URL': '/dummy-static/', - 'id': 'prob_1_2', - 'status': inputtypes.Status('unanswered'), - 'describedby_html': DESCRIBEDBY.format(status_id='prob_1_2'), - 'msg': "", - 'params': None, - 'jschannel_loader': '/dummy-static/js/capa/src/jschannel.js', - 'jsinput_loader': '/dummy-static/js/capa/src/jsinput.js', - 'saved_state': 103, - 'response_data': RESPONSE_DATA, - 'value': 103 + "STATIC_URL": "/dummy-static/", + "id": "prob_1_2", + "status": inputtypes.Status("unanswered"), + "describedby_html": DESCRIBEDBY.format(status_id="prob_1_2"), + "msg": "", + "params": None, + "jschannel_loader": "/dummy-static/js/capa/src/jschannel.js", + "jsinput_loader": "/dummy-static/js/capa/src/jsinput.js", + "saved_state": 103, + "response_data": RESPONSE_DATA, + "value": 103, } full_expected_context.update(expected_context) @@ -266,28 +261,25 @@ def test_rendering(self): element = etree.fromstring(xml_str) - state = { - 'value': 'BumbleBee', - 'response_data': RESPONSE_DATA - } - the_input = lookup_tag('textline')(mock_capa_system(), element, state) + state = {"value": "BumbleBee", "response_data": RESPONSE_DATA} + the_input = lookup_tag("textline")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'BumbleBee', - 'status': inputtypes.Status('unanswered'), - 'size': size, - 'msg': '', - 'hidden': False, - 'inline': False, - 'do_math': False, - 'trailing_text': '', - 'preprocessor': None, - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": "BumbleBee", + "status": inputtypes.Status("unanswered"), + "size": size, + "msg": "", + "hidden": False, + "inline": False, + "do_math": False, + "trailing_text": "", + "preprocessor": None, + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -298,35 +290,34 @@ def test_math_rendering(self): xml_str = """""".format(size=size, pp=preprocessorClass, sc=script) + preprocessorSrc="{sc}"/>""".format( + size=size, pp=preprocessorClass, sc=script + ) element = etree.fromstring(xml_str) - state = { - 'value': 'BumbleBee', - 'response_data': RESPONSE_DATA - } - the_input = lookup_tag('textline')(mock_capa_system(), element, state) + state = {"value": "BumbleBee", "response_data": RESPONSE_DATA} + the_input = lookup_tag("textline")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'BumbleBee', - 'status': inputtypes.Status('unanswered'), - 'size': size, - 'msg': '', - 'hidden': False, - 'inline': False, - 'trailing_text': '', - 'do_math': True, - 'preprocessor': { - 'class_name': preprocessorClass, - 'script_src': script, + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": "BumbleBee", + "status": inputtypes.Status("unanswered"), + "size": size, + "msg": "", + "hidden": False, + "inline": False, + "trailing_text": "", + "do_math": True, + "preprocessor": { + "class_name": preprocessorClass, + "script_src": script, }, - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -335,43 +326,42 @@ def test_trailing_text_rendering(self): # store (xml_text, expected) trailing_text = [] # standard trailing text - trailing_text.append(('m/s', 'm/s')) + trailing_text.append(("m/s", "m/s")) # unicode trailing text - trailing_text.append(('\xc3', '\xc3')) + trailing_text.append(("\xc3", "\xc3")) # html escaped trailing text # this is the only one we expect to change - trailing_text.append(('a < b', 'a < b')) + trailing_text.append(("a < b", "a < b")) for xml_text, expected_text in trailing_text: xml_str = """""".format(size=size, tt=xml_text) + />""".format( + size=size, tt=xml_text + ) element = etree.fromstring(xml_str) - state = { - 'value': 'BumbleBee', - 'response_data': RESPONSE_DATA - } - the_input = lookup_tag('textline')(mock_capa_system(), element, state) + state = {"value": "BumbleBee", "response_data": RESPONSE_DATA} + the_input = lookup_tag("textline")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'BumbleBee', - 'status': inputtypes.Status('unanswered'), - 'size': size, - 'msg': '', - 'hidden': False, - 'inline': False, - 'do_math': False, - 'trailing_text': expected_text, - 'preprocessor': None, - 'response_data': RESPONSE_DATA, - 'describedby_html': TRAILING_TEXT_DESCRIBEDBY.format(trailing_text_id=prob_id, status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": "BumbleBee", + "status": inputtypes.Status("unanswered"), + "size": size, + "msg": "", + "hidden": False, + "inline": False, + "do_math": False, + "trailing_text": expected_text, + "preprocessor": None, + "response_data": RESPONSE_DATA, + "describedby_html": TRAILING_TEXT_DESCRIBEDBY.format(trailing_text_id=prob_id, status_id=prob_id), } assert context == expected @@ -388,33 +378,35 @@ def test_rendering(self): xml_str = """""".format(af=allowed_files, - rf=required_files,) + />""".format( + af=allowed_files, + rf=required_files, + ) element = etree.fromstring(xml_str) state = { - 'value': 'BumbleBee.py', - 'status': 'incomplete', - 'feedback': {'message': '3'}, - 'response_data': RESPONSE_DATA + "value": "BumbleBee.py", + "status": "incomplete", + "feedback": {"message": "3"}, + "response_data": RESPONSE_DATA, } - input_class = lookup_tag('filesubmission') + input_class = lookup_tag("filesubmission") the_input = input_class(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'status': inputtypes.Status('queued'), - 'msg': the_input.submitted_msg, - 'value': 'BumbleBee.py', - 'queue_len': '3', - 'allowed_files': '["runme.py", "nooooo.rb", "ohai.java"]', - 'required_files': '["cookies.py"]', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "status": inputtypes.Status("queued"), + "msg": the_input.submitted_msg, + "value": "BumbleBee.py", + "queue_len": "3", + "allowed_files": '["runme.py", "nooooo.rb", "ohai.java"]', + "required_files": '["cookies.py"]', + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -427,10 +419,10 @@ class CodeInputTest(unittest.TestCase): def test_rendering(self): mode = "parrot" - linenumbers = 'false' - rows = '37' - cols = '11' - tabsize = '7' + linenumbers = "false" + rows = "37" + cols = "11" + tabsize = "7" xml_str = """""".format(m=mode, c=cols, r=rows, ln=linenumbers, ts=tabsize) + />""".format( + m=mode, c=cols, r=rows, ln=linenumbers, ts=tabsize + ) element = etree.fromstring(xml_str) state = { - 'value': 'print "good evening"', - 'status': 'incomplete', - 'feedback': {'message': '3'}, - 'response_data': RESPONSE_DATA + "value": 'print "good evening"', + "status": "incomplete", + "feedback": {"message": "3"}, + "response_data": RESPONSE_DATA, } - input_class = lookup_tag('codeinput') + input_class = lookup_tag("codeinput") the_input = input_class(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'print "good evening"', - 'status': inputtypes.Status('queued'), - 'msg': the_input.submitted_msg, - 'mode': mode, - 'linenumbers': linenumbers, - 'rows': rows, - 'cols': cols, - 'hidden': '', - 'tabsize': int(tabsize), - 'queue_len': '3', - 'aria_label': '{mode} editor'.format(mode=mode), - 'code_mirror_exit_message': 'Press ESC then TAB or click outside of the code editor to exit', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": 'print "good evening"', + "status": inputtypes.Status("queued"), + "msg": the_input.submitted_msg, + "mode": mode, + "linenumbers": linenumbers, + "rows": rows, + "cols": cols, + "hidden": "", + "tabsize": int(tabsize), + "queue_len": "3", + "aria_label": "{mode} editor".format(mode=mode), + "code_mirror_exit_message": "Press ESC then TAB or click outside of the code editor to exit", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -480,14 +474,15 @@ class MatlabTest(unittest.TestCase): """ Test Matlab input types """ + def setUp(self): super(MatlabTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments - self.rows = '10' - self.cols = '80' - self.tabsize = '4' + self.rows = "10" + self.cols = "80" + self.tabsize = "4" self.mode = "" self.payload = "payload" - self.linenumbers = 'true' + self.linenumbers = "true" self.xml = """ {payload} - """.format(r=self.rows, - c=self.cols, - tabsize=self.tabsize, - m=self.mode, - payload=self.payload, - ln=self.linenumbers) + """.format( + r=self.rows, c=self.cols, tabsize=self.tabsize, m=self.mode, payload=self.payload, ln=self.linenumbers + ) elt = etree.fromstring(self.xml) state = { - 'value': 'print "good evening"', - 'status': 'incomplete', - 'feedback': {'message': '3'}, - 'response_data': {} + "value": 'print "good evening"', + "status": "incomplete", + "feedback": {"message": "3"}, + "response_data": {}, } - self.input_class = lookup_tag('matlabinput') + self.input_class = lookup_tag("matlabinput") self.the_input = self.input_class(mock_capa_system(), elt, state) def test_rendering(self): context = self.the_input._get_render_context() # pylint: disable=protected-access expected = { - 'STATIC_URL': '/dummy-static/', - 'id': 'prob_1_2', - 'value': 'print "good evening"', - 'status': inputtypes.Status('queued'), - 'msg': self.the_input.submitted_msg, - 'mode': self.mode, - 'rows': self.rows, - 'cols': self.cols, - 'queue_msg': '', - 'linenumbers': 'true', - 'hidden': '', - 'tabsize': int(self.tabsize), - 'button_enabled': True, - 'queue_len': '3', - 'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js', - 'response_data': {}, - 'describedby_html': HTML('aria-describedby="status_prob_1_2"') + "STATIC_URL": "/dummy-static/", + "id": "prob_1_2", + "value": 'print "good evening"', + "status": inputtypes.Status("queued"), + "msg": self.the_input.submitted_msg, + "mode": self.mode, + "rows": self.rows, + "cols": self.cols, + "queue_msg": "", + "linenumbers": "true", + "hidden": "", + "tabsize": int(self.tabsize), + "button_enabled": True, + "queue_len": "3", + "matlab_editor_js": "/dummy-static/js/vendor/CodeMirror/octave.js", + "response_data": {}, + "describedby_html": HTML('aria-describedby="status_prob_1_2"'), } assert context == expected def test_rendering_with_state(self): state = { - 'value': 'print "good evening"', - 'status': 'incomplete', - 'input_state': {'queue_msg': 'message'}, - 'feedback': {'message': '3'}, - 'response_data': RESPONSE_DATA + "value": 'print "good evening"', + "status": "incomplete", + "input_state": {"queue_msg": "message"}, + "feedback": {"message": "3"}, + "response_data": RESPONSE_DATA, } elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'print "good evening"', - 'status': inputtypes.Status('queued'), - 'msg': the_input.submitted_msg, - 'mode': self.mode, - 'rows': self.rows, - 'cols': self.cols, - 'queue_msg': 'message', - 'linenumbers': 'true', - 'hidden': '', - 'tabsize': int(self.tabsize), - 'button_enabled': True, - 'queue_len': '3', - 'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": 'print "good evening"', + "status": inputtypes.Status("queued"), + "msg": the_input.submitted_msg, + "mode": self.mode, + "rows": self.rows, + "cols": self.cols, + "queue_msg": "message", + "linenumbers": "true", + "hidden": "", + "tabsize": int(self.tabsize), + "button_enabled": True, + "queue_len": "3", + "matlab_editor_js": "/dummy-static/js/vendor/CodeMirror/octave.js", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected def test_rendering_when_completed(self): - for status in ['correct', 'incorrect']: + for status in ["correct", "incorrect"]: state = { - 'value': 'print "good evening"', - 'status': status, - 'input_state': {}, - 'response_data': RESPONSE_DATA + "value": 'print "good evening"', + "status": status, + "input_state": {}, + "response_data": RESPONSE_DATA, } elt = etree.fromstring(self.xml) - prob_id = 'prob_1_2' + prob_id = "prob_1_2" the_input = self.input_class(mock_capa_system(), elt, state) context = the_input._get_render_context() # pylint: disable=protected-access expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'print "good evening"', - 'status': inputtypes.Status(status), - 'msg': '', - 'mode': self.mode, - 'rows': self.rows, - 'cols': self.cols, - 'queue_msg': '', - 'linenumbers': 'true', - 'hidden': '', - 'tabsize': int(self.tabsize), - 'button_enabled': False, - 'queue_len': '0', - 'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": 'print "good evening"', + "status": inputtypes.Status(status), + "msg": "", + "mode": self.mode, + "rows": self.rows, + "cols": self.cols, + "queue_msg": "", + "linenumbers": "true", + "hidden": "", + "tabsize": int(self.tabsize), + "button_enabled": False, + "queue_len": "0", + "matlab_editor_js": "/dummy-static/js/vendor/CodeMirror/octave.js", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected - @patch('xmodule.capa.inputtypes.time.time', return_value=10) + @patch("xmodule.capa.inputtypes.time.time", return_value=10) def test_rendering_while_queued(self, time): # lint-amnesty, pylint: disable=unused-argument state = { - 'value': 'print "good evening"', - 'status': 'incomplete', - 'input_state': {'queuestate': 'queued', 'queuetime': 5}, - 'response_data': RESPONSE_DATA + "value": 'print "good evening"', + "status": "incomplete", + "input_state": {"queuestate": "queued", "queuetime": 5}, + "response_data": RESPONSE_DATA, } elt = etree.fromstring(self.xml) - prob_id = 'prob_1_2' + prob_id = "prob_1_2" the_input = self.input_class(mock_capa_system(), elt, state) context = the_input._get_render_context() # pylint: disable=protected-access expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'print "good evening"', - 'status': inputtypes.Status('queued'), - 'msg': the_input.submitted_msg, - 'mode': self.mode, - 'rows': self.rows, - 'cols': self.cols, - 'queue_msg': '', - 'linenumbers': 'true', - 'hidden': '', - 'tabsize': int(self.tabsize), - 'button_enabled': True, - 'queue_len': '1', - 'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": 'print "good evening"', + "status": inputtypes.Status("queued"), + "msg": the_input.submitted_msg, + "mode": self.mode, + "rows": self.rows, + "cols": self.cols, + "queue_msg": "", + "linenumbers": "true", + "hidden": "", + "tabsize": int(self.tabsize), + "button_enabled": True, + "queue_len": "1", + "matlab_editor_js": "/dummy-static/js/vendor/CodeMirror/octave.js", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected def test_plot_data(self): - data = {'submission': 'x = 1234;'} + data = {"submission": "x = 1234;"} response = self.the_input.handle_ajax("plot", data) self.the_input.capa_system.xqueue.interface.send_to_queue.assert_called_with(header=ANY, body=ANY) - assert response['success'] - assert self.the_input.input_state['queuekey'] is not None - assert self.the_input.input_state['queuestate'] == 'queued' + assert response["success"] + assert self.the_input.input_state["queuekey"] is not None + assert self.the_input.input_state["queuestate"] == "queued" def test_plot_data_failure(self): - data = {'submission': 'x = 1234;'} - error_message = 'Error message!' + data = {"submission": "x = 1234;"} + error_message = "Error message!" self.the_input.capa_system.xqueue.interface.send_to_queue.return_value = (1, error_message) response = self.the_input.handle_ajax("plot", data) - assert not response['success'] - assert response['message'] == error_message - assert 'queuekey' not in self.the_input.input_state - assert 'queuestate' not in self.the_input.input_state + assert not response["success"] + assert response["message"] == error_message + assert "queuekey" not in self.the_input.input_state + assert "queuestate" not in self.the_input.input_state - @patch('xmodule.capa.inputtypes.time.time', return_value=10) + @patch("xmodule.capa.inputtypes.time.time", return_value=10) def test_ungraded_response_success(self, time): # lint-amnesty, pylint: disable=unused-argument - queuekey = 'abcd' - input_state = {'queuekey': queuekey, 'queuestate': 'queued', 'queuetime': 5} - state = {'value': 'print "good evening"', - 'status': 'incomplete', - 'input_state': input_state, - 'feedback': {'message': '3'}, } + queuekey = "abcd" + input_state = {"queuekey": queuekey, "queuestate": "queued", "queuetime": 5} + state = { + "value": 'print "good evening"', + "status": "incomplete", + "input_state": input_state, + "feedback": {"message": "3"}, + } elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) - inner_msg = 'hello!' - queue_msg = json.dumps({'msg': inner_msg}) + inner_msg = "hello!" + queue_msg = json.dumps({"msg": inner_msg}) the_input.ungraded_response(queue_msg, queuekey) - assert input_state['queuekey'] is None - assert input_state['queuestate'] is None - assert input_state['queue_msg'] == inner_msg + assert input_state["queuekey"] is None + assert input_state["queuestate"] is None + assert input_state["queue_msg"] == inner_msg - @patch('xmodule.capa.inputtypes.time.time', return_value=10) + @patch("xmodule.capa.inputtypes.time.time", return_value=10) def test_ungraded_response_key_mismatch(self, time): # lint-amnesty, pylint: disable=unused-argument - queuekey = 'abcd' - input_state = {'queuekey': queuekey, 'queuestate': 'queued', 'queuetime': 5} - state = {'value': 'print "good evening"', - 'status': 'incomplete', - 'input_state': input_state, - 'feedback': {'message': '3'}, } + queuekey = "abcd" + input_state = {"queuekey": queuekey, "queuestate": "queued", "queuetime": 5} + state = { + "value": 'print "good evening"', + "status": "incomplete", + "input_state": input_state, + "feedback": {"message": "3"}, + } elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) - inner_msg = 'hello!' - queue_msg = json.dumps({'msg': inner_msg}) + inner_msg = "hello!" + queue_msg = json.dumps({"msg": inner_msg}) - the_input.ungraded_response(queue_msg, 'abc') - assert input_state['queuekey'] == queuekey - assert input_state['queuestate'] == 'queued' - assert 'queue_msg' not in input_state + the_input.ungraded_response(queue_msg, "abc") + assert input_state["queuekey"] == queuekey + assert input_state["queuestate"] == "queued" + assert "queue_msg" not in input_state - @patch('xmodule.capa.inputtypes.time.time', return_value=20) + @patch("xmodule.capa.inputtypes.time.time", return_value=20) def test_matlab_response_timeout_not_exceeded(self, time): # lint-amnesty, pylint: disable=unused-argument - state = {'input_state': {'queuestate': 'queued', 'queuetime': 5}} + state = {"input_state": {"queuestate": "queued", "queuetime": 5}} elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) - assert the_input.status == 'queued' + assert the_input.status == "queued" - @patch('xmodule.capa.inputtypes.time.time', return_value=45) + @patch("xmodule.capa.inputtypes.time.time", return_value=45) def test_matlab_response_timeout_exceeded(self, time): # lint-amnesty, pylint: disable=unused-argument - state = {'input_state': {'queuestate': 'queued', 'queuetime': 5}} + state = {"input_state": {"queuestate": "queued", "queuetime": 5}} elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) - assert the_input.status == 'unsubmitted' - assert the_input.msg == 'No response from Xqueue within {} seconds. Aborted.'.format(XQUEUE_TIMEOUT) + assert the_input.status == "unsubmitted" + assert the_input.msg == "No response from Xqueue within {} seconds. Aborted.".format(XQUEUE_TIMEOUT) - @patch('xmodule.capa.inputtypes.time.time', return_value=20) + @patch("xmodule.capa.inputtypes.time.time", return_value=20) def test_matlab_response_migration_of_queuetime(self, time): # lint-amnesty, pylint: disable=unused-argument """ Test if problem was saved before queuetime was introduced. """ - state = {'input_state': {'queuestate': 'queued'}} + state = {"input_state": {"queuestate": "queued"}} elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) - assert the_input.status == 'unsubmitted' + assert the_input.status == "unsubmitted" def test_matlab_api_key(self): """ @@ -732,28 +728,28 @@ def test_matlab_api_key(self): """ elt = etree.fromstring(self.xml) system = mock_capa_system() - system.matlab_api_key = 'test_api_key' - the_input = lookup_tag('matlabinput')(system, elt, {}) + system.matlab_api_key = "test_api_key" + the_input = lookup_tag("matlabinput")(system, elt, {}) - data = {'submission': 'x = 1234;'} + data = {"submission": "x = 1234;"} response = the_input.handle_ajax("plot", data) # lint-amnesty, pylint: disable=unused-variable - body = system.xqueue.interface.send_to_queue.call_args[1]['body'] + body = system.xqueue.interface.send_to_queue.call_args[1]["body"] payload = json.loads(body) - assert 'test_api_key' == payload['token'] - assert '2' == payload['endpoint_version'] + assert "test_api_key" == payload["token"] + assert "2" == payload["endpoint_version"] def test_get_html(self): # usual output output = self.the_input.get_html() - output_string = etree.tostring(output).decode('utf-8') - assert output_string.startswith('
{') - assert output_string.endswith('}
') - output_string = output_string.replace('}
', '') - output_string = output_string.replace('
{', '') - output_list = output_string.split(',') + output_string = etree.tostring(output).decode("utf-8") + assert output_string.startswith("
{") + assert output_string.endswith("}
") + output_string = output_string.replace("}
", "") + output_string = output_string.replace("
{", "") + output_list = output_string.split(",") for index, value in enumerate(output_list): - output_list[index] = value.replace('u\'', '\'').strip() + output_list[index] = value.replace("u'", "'").strip() expected_string = """ \'matlab_editor_js\': \'/dummy-static/js/vendor/CodeMirror/octave.js\', @@ -763,19 +759,24 @@ def test_get_html(self): \'id\': \'prob_1_2\', \'queue_len\': \'3\', \'tabsize\': 4, \'STATIC_URL\': \'/dummy-static/\', \'linenumbers\': \'true\', \'cols\': \'80\', \'button_enabled\': True, \'rows\': \'10\', \'describedby_html\': Markup(\'aria-describedby="status_prob_1_2"\')""" - expected_list = (textwrap.dedent(expected_string).replace('\n', ' ').strip()).split(',') + expected_list = (textwrap.dedent(expected_string).replace("\n", " ").strip()).split(",") for index, value in enumerate(expected_list): - expected_list[index] = value.replace('u\'', '\'').strip() + expected_list[index] = value.replace("u'", "'").strip() six.assertCountEqual(self, output_list, expected_list) # test html, that is correct HTML5 html, but is not parsable by XML parser. old_render_template = self.the_input.capa_system.render_template - self.the_input.capa_system.render_template = lambda *args: textwrap.dedent(""" -
- -
Right click here and click \"Save As\" to download the file
-
    - """).replace('\n', '') + self.the_input.capa_system.render_template = lambda *args: textwrap.dedent( + ( + "
    " + "" + "
    Right click " + "here " + 'and click "Save As" to download the file
    ' + "
      " + ) + ).replace("\n", "") output = self.the_input.get_html() elements = [] @@ -785,18 +786,18 @@ def test_get_html(self): elements.append(element) element_tags.append(element.tag) element_keys.append(element.keys()) - assert element_tags.count('div') == 4 - assert element_tags.count('audio') == 1 - audio_index = element_tags.index('audio') - - six.assertCountEqual(self, element_keys[audio_index], ['autobuffer', 'controls', 'autoplay', 'src']) - assert elements[audio_index].get('src') == 'data:audio/wav;base64=' - assert elements[audio_index].text == 'Audio is not supported on this browser.' - href_index = element_keys.index(['href']) - assert elements[href_index].get('href') == 'https://endpoint.mss-mathworks.com/media/filename.wav' - id_index = element_keys.index(['id']) - assert elements[id_index].get('id') == 'mwAudioPlaceHolder' - output_string = etree.tostring(output).decode('utf-8') + assert element_tags.count("div") == 4 + assert element_tags.count("audio") == 1 + audio_index = element_tags.index("audio") + + six.assertCountEqual(self, element_keys[audio_index], ["autobuffer", "controls", "autoplay", "src"]) + assert elements[audio_index].get("src") == "data:audio/wav;base64=" + assert elements[audio_index].text == "Audio is not supported on this browser." + href_index = element_keys.index(["href"]) + assert elements[href_index].get("href") == "https://endpoint.mss-mathworks.com/media/filename.wav" + id_index = element_keys.index(["id"]) + assert elements[id_index].get("id") == "mwAudioPlaceHolder" + output_string = etree.tostring(output).decode("utf-8") # check that exception is raised during parsing for html. self.the_input.capa_system.render_template = lambda *args: "
      if Conditionally execute statements. - The general form of the if statement is - - if expression - statements - ELSEIF expression - statements - ELSE - statements - END - - The statements are executed if the real part of the expression - has all non-zero elements. The ELSE and ELSEIF parts are optional. - Zero or more ELSEIF parts can be used as well as nested if's. - The expression is usually of the form expr rop expr where - rop is ==, <, >, <=, >=, or ~=. - - - Example - if I == J - A(I,J) = 2; - elseif abs(I-J) == 1 - A(I,J) = -1; - else - A(I,J) = 0; - end - - See also relop, else, elseif, end, for, while, switch. - - Reference page in Help browser - doc if - -
        - """) + queue_msg = textwrap.dedent( + """ +
        +
        + if Conditionally execute statements. + The general form of the if statement is + + if expression + statements + ELSEIF expression + statements + ELSE + statements + END + + The statements are executed if the real part of the expression + has all non-zero elements. The ELSE and ELSEIF parts are optional. + Zero or more ELSEIF parts can be used as well as nested if's. + The expression is usually of the form expr rop expr where + rop is ==, <, >, <=, >=, or ~=. + + + Example + if I == J + A(I,J) = 2; + elseif abs(I-J) == 1 + A(I,J) = -1; + else + A(I,J) = 0; + end + + See also relop, + else, + elseif, + end, + for, + while, + switch. + + Reference page in Help browser + doc if + +
        +
          +
          + """ + ) state = { - 'value': 'print "good evening"', - 'status': 'incomplete', - 'input_state': {'queue_msg': queue_msg}, - 'feedback': {'message': '3'}, - 'response_data': RESPONSE_DATA + "value": 'print "good evening"', + "status": "incomplete", + "input_state": {"queue_msg": queue_msg}, + "feedback": {"message": "3"}, + "response_data": RESPONSE_DATA, } elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) context = the_input._get_render_context() # pylint: disable=protected-access self.maxDiff = None - expected = fromstring('\n
          if Conditionally execute statements.\nThe general form of the if statement is\n\n if expression\n statements\n ELSEIF expression\n statements\n ELSE\n statements\n END\n\nThe statements are executed if the real part of the expression \nhas all non-zero elements. The ELSE and ELSEIF parts are optional.\nZero or more ELSEIF parts can be used as well as nested if\'s.\nThe expression is usually of the form expr rop expr where \nrop is ==, <, >, <=, >=, or ~=.\n\n\nExample\n if I == J\n A(I,J) = 2;\n elseif abs(I-J) == 1\n A(I,J) = -1;\n else\n A(I,J) = 0;\n end\n\nSee also relop, else, elseif, end, for, while, switch.\n\nReference page in Help browser\n doc if\n\n
            \n') # lint-amnesty, pylint: disable=line-too-long - received = fromstring(context['queue_msg']) + expected = fromstring( + '\n
            if Conditionally execute statements.\nThe general form of the if statement is\n\n if expression\n statements\n ELSEIF expression\n statements\n ELSE\n statements\n END\n\nThe statements are executed if the real part of the expression \nhas all non-zero elements. The ELSE and ELSEIF parts are optional.\nZero or more ELSEIF parts can be used as well as nested if\'s.\nThe expression is usually of the form expr rop expr where \nrop is ==, <, >, <=, >=, or ~=.\n\n\nExample\n if I == J\n A(I,J) = 2;\n elseif abs(I-J) == 1\n A(I,J) = -1;\n else\n A(I,J) = 0;\n end\n\nSee also relop, else, elseif, end, for, while, switch.\n\nReference page in Help browser\n doc if\n\n
              \n' # lint-amnesty, pylint: disable=line-too-long + ) + received = fromstring(context["queue_msg"]) html_tree_equal(received, expected) def test_rendering_with_invalid_queue_msg(self): - self.the_input.queue_msg = ("
              " # lint-amnesty, pylint: disable=line-too-long - "\nans =\n\n\u0002\n\n
                ") + self.the_input.queue_msg = ( + "
                " # lint-amnesty, pylint: disable=line-too-long + "\nans =\n\n\u0002\n\n
                  " + ) context = self.the_input._get_render_context() # pylint: disable=protected-access self.maxDiff = None - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'print "good evening"', - 'status': inputtypes.Status('queued'), - 'msg': self.the_input.submitted_msg, - 'mode': self.mode, - 'rows': self.rows, - 'cols': self.cols, - 'queue_msg': "Error running code.", - 'linenumbers': 'true', - 'hidden': '', - 'tabsize': int(self.tabsize), - 'button_enabled': True, - 'queue_len': '3', - 'matlab_editor_js': '/dummy-static/js/vendor/CodeMirror/octave.js', - 'response_data': {}, - 'describedby_html': 'aria-describedby="status_{id}"'.format(id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": 'print "good evening"', + "status": inputtypes.Status("queued"), + "msg": self.the_input.submitted_msg, + "mode": self.mode, + "rows": self.rows, + "cols": self.cols, + "queue_msg": "Error running code.", + "linenumbers": "true", + "hidden": "", + "tabsize": int(self.tabsize), + "button_enabled": True, + "queue_len": "3", + "matlab_editor_js": "/dummy-static/js/vendor/CodeMirror/octave.js", + "response_data": {}, + "describedby_html": 'aria-describedby="status_{id}"'.format(id=prob_id), } assert context == expected self.the_input.capa_system.render_template = DemoSystem().render_template @@ -893,12 +910,12 @@ def test_matlab_queue_message_allowed_tags(self): """ Test allowed tags. """ - allowed_tags = ['div', 'p', 'audio', 'pre', 'span'] + allowed_tags = ["div", "p", "audio", "pre", "span"] for tag in allowed_tags: queue_msg = "<{0}>Test message".format(tag) state = { - 'input_state': {'queue_msg': queue_msg}, - 'status': 'queued', + "input_state": {"queue_msg": queue_msg}, + "status": "queued", } elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) @@ -908,11 +925,11 @@ def test_matlab_queue_message_not_allowed_tag(self): """ Test not allowed tag. """ - not_allowed_tag = 'script' + not_allowed_tag = "script" queue_msg = "<{0}>Test message".format(not_allowed_tag) state = { - 'input_state': {'queue_msg': queue_msg}, - 'status': 'queued', + "input_state": {"queue_msg": queue_msg}, + "status": "queued", } elt = etree.fromstring(self.xml) the_input = self.input_class(mock_capa_system(), elt, state) @@ -923,17 +940,17 @@ def test_matlab_sanitize_msg(self): """ Check that the_input.msg is sanitized. """ - not_allowed_tag = 'script' + not_allowed_tag = "script" self.the_input.msg = "<{0}>Test message".format(not_allowed_tag) expected = "" - assert self.the_input._get_render_context()['msg'] == expected # pylint: disable=protected-access + assert self.the_input._get_render_context()["msg"] == expected # pylint: disable=protected-access def html_tree_equal(received, expected): """ Returns whether two etree Elements are the same, with insensitivity to attribute order. """ - for attr in ('tag', 'attrib', 'text', 'tail'): + for attr in ("tag", "attrib", "text", "tail"): if getattr(received, attr) != getattr(expected, attr): return False if len(received) != len(expected): @@ -947,13 +964,14 @@ class SchematicTest(unittest.TestCase): """ Check that schematic inputs work """ + def test_rendering(self): - height = '12' - width = '33' - parts = 'resistors, capacitors, and flowers' - analyses = 'fast, slow, and pink' - initial_value = 'two large batteries' - submit_analyses = 'maybe' + height = "12" + width = "33" + parts = "resistors, capacitors, and flowers" + analyses = "fast, slow, and pink" + initial_value = "two large batteries" + submit_analyses = "maybe" xml_str = """""".format(h=height, w=width, p=parts, a=analyses, - iv=initial_value, sa=submit_analyses) + />""".format( + h=height, w=width, p=parts, a=analyses, iv=initial_value, sa=submit_analyses + ) element = etree.fromstring(xml_str) - value = 'three resistors and an oscilating pendulum' - state = { - 'value': value, - 'status': 'unsubmitted', - 'response_data': RESPONSE_DATA - } + value = "three resistors and an oscilating pendulum" + state = {"value": value, "status": "unsubmitted", "response_data": RESPONSE_DATA} - the_input = lookup_tag('schematic')(mock_capa_system(), element, state) + the_input = lookup_tag("schematic")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': value, - 'status': inputtypes.Status('unsubmitted'), - 'msg': '', - 'initial_value': initial_value, - 'width': width, - 'height': height, - 'parts': parts, - 'setup_script': '/dummy-static/js/capa/schematicinput.js', - 'analyses': analyses, - 'submit_analyses': submit_analyses, - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": value, + "status": inputtypes.Status("unsubmitted"), + "msg": "", + "initial_value": initial_value, + "width": width, + "height": height, + "parts": parts, + "setup_script": "/dummy-static/js/capa/schematicinput.js", + "analyses": analyses, + "submit_analyses": submit_analyses, + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -1002,97 +1017,95 @@ class ImageInputTest(unittest.TestCase): """ Check that image inputs work """ + def check(self, value, egx, egy): # lint-amnesty, pylint: disable=missing-function-docstring - height = '78' - width = '427' - src = 'http://www.edx.org/cowclicker.jpg' + height = "78" + width = "427" + src = "http://www.edx.org/cowclicker.jpg" xml_str = """""".format(s=src, h=height, w=width) + />""".format( + s=src, h=height, w=width + ) element = etree.fromstring(xml_str) - state = { - 'value': value, - 'status': 'unsubmitted', - 'response_data': RESPONSE_DATA - } + state = {"value": value, "status": "unsubmitted", "response_data": RESPONSE_DATA} - the_input = lookup_tag('imageinput')(mock_capa_system(), element, state) + the_input = lookup_tag("imageinput")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': value, - 'status': inputtypes.Status('unsubmitted'), - 'width': width, - 'height': height, - 'src': src, - 'gx': egx, - 'gy': egy, - 'msg': '', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": value, + "status": inputtypes.Status("unsubmitted"), + "width": width, + "height": height, + "src": src, + "gx": egx, + "gy": egy, + "msg": "", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected def test_with_value(self): # Check that compensating for the dot size works properly. - self.check('[50,40]', 35, 25) + self.check("[50,40]", 35, 25) def test_without_value(self): - self.check('', 0, 0) + self.check("", 0, 0) def test_corrupt_values(self): - self.check('[12', 0, 0) - self.check('[12, a]', 0, 0) - self.check('[12 10]', 0, 0) - self.check('[12]', 0, 0) - self.check('[12 13 14]', 0, 0) + self.check("[12", 0, 0) + self.check("[12, a]", 0, 0) + self.check("[12 10]", 0, 0) + self.check("[12]", 0, 0) + self.check("[12 13 14]", 0, 0) class CrystallographyTest(unittest.TestCase): """ Check that crystallography inputs work """ + def test_rendering(self): - height = '12' - width = '33' + height = "12" + width = "33" xml_str = """""".format(h=height, w=width) + />""".format( + h=height, w=width + ) element = etree.fromstring(xml_str) - value = 'abc' - state = { - 'value': value, - 'status': 'unsubmitted', - 'response_data': RESPONSE_DATA - } + value = "abc" + state = {"value": value, "status": "unsubmitted", "response_data": RESPONSE_DATA} - the_input = lookup_tag('crystallography')(mock_capa_system(), element, state) + the_input = lookup_tag("crystallography")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': value, - 'status': inputtypes.Status('unsubmitted'), - 'msg': '', - 'width': width, - 'height': height, - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": value, + "status": inputtypes.Status("unsubmitted"), + "msg": "", + "width": width, + "height": height, + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -1102,9 +1115,10 @@ class VseprTest(unittest.TestCase): """ Check that vsepr inputs work """ + def test_rendering(self): - height = '12' - width = '33' + height = "12" + width = "33" molecules = "H2O, C2O" geometries = "AX12,TK421" @@ -1113,33 +1127,31 @@ def test_rendering(self): width="{w}" molecules="{m}" geometries="{g}" - />""".format(h=height, w=width, m=molecules, g=geometries) + />""".format( + h=height, w=width, m=molecules, g=geometries + ) element = etree.fromstring(xml_str) - value = 'abc' - state = { - 'value': value, - 'status': 'unsubmitted', - 'response_data': RESPONSE_DATA - } + value = "abc" + state = {"value": value, "status": "unsubmitted", "response_data": RESPONSE_DATA} - the_input = lookup_tag('vsepr_input')(mock_capa_system(), element, state) + the_input = lookup_tag("vsepr_input")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': value, - 'status': inputtypes.Status('unsubmitted'), - 'msg': '', - 'width': width, - 'height': height, - 'molecules': molecules, - 'geometries': geometries, - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": value, + "status": inputtypes.Status("unsubmitted"), + "msg": "", + "width": width, + "height": height, + "molecules": molecules, + "geometries": geometries, + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -1149,6 +1161,7 @@ class ChemicalEquationTest(unittest.TestCase): """ Check that chemical equation inputs work. """ + def setUp(self): super(ChemicalEquationTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.size = "42" @@ -1156,28 +1169,25 @@ def setUp(self): element = etree.fromstring(xml_str) - state = { - 'value': 'H2OYeah', - 'response_data': RESPONSE_DATA - } - self.the_input = lookup_tag('chemicalequationinput')(mock_capa_system(), element, state) + state = {"value": "H2OYeah", "response_data": RESPONSE_DATA} + self.the_input = lookup_tag("chemicalequationinput")(mock_capa_system(), element, state) def test_rendering(self): """ Verify that the render context matches the expected render context """ context = self.the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'H2OYeah', - 'status': inputtypes.Status('unanswered'), - 'msg': '', - 'size': self.size, - 'previewer': '/dummy-static/js/capa/chemical_equation_preview.js', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": "H2OYeah", + "status": inputtypes.Status("unanswered"), + "msg": "", + "size": self.size, + "previewer": "/dummy-static/js/capa/chemical_equation_preview.js", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -1185,12 +1195,12 @@ def test_chemcalc_ajax_sucess(self): """ Verify that using the correct dispatch and valid data produces a valid response """ - data = {'formula': "H"} + data = {"formula": "H"} response = self.the_input.handle_ajax("preview_chemcalc", data) - assert 'preview' in response - assert response['preview'] != '' - assert response['error'] == '' + assert "preview" in response + assert response["preview"] != "" + assert response["error"] == "" def test_ajax_bad_method(self): """ @@ -1204,46 +1214,39 @@ def test_ajax_no_formula(self): When we ask for a formula rendering, there should be an error if no formula """ response = self.the_input.handle_ajax("preview_chemcalc", {}) - assert 'error' in response - assert response['error'] == 'No formula specified.' + assert "error" in response + assert response["error"] == "No formula specified." def test_ajax_parse_err(self): """ With parse errors, ChemicalEquationInput should give an error message """ # Simulate answering a problem that raises the exception - with patch('xmodule.capa.inputtypes.chemcalc.render_to_html') as mock_render: + with patch("xmodule.capa.inputtypes.chemcalc.render_to_html") as mock_render: mock_render.side_effect = ParseException("ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ") - response = self.the_input.handle_ajax( - "preview_chemcalc", - {'formula': 'H2O + invalid chemistry'} - ) + response = self.the_input.handle_ajax("preview_chemcalc", {"formula": "H2O + invalid chemistry"}) - assert 'error' in response - assert "Couldn't parse formula" in response['error'] + assert "error" in response + assert "Couldn't parse formula" in response["error"] - @patch('xmodule.capa.inputtypes.log') + @patch("xmodule.capa.inputtypes.log") def test_ajax_other_err(self, mock_log): """ With other errors, test that ChemicalEquationInput also logs it """ - with patch('xmodule.capa.inputtypes.chemcalc.render_to_html') as mock_render: + with patch("xmodule.capa.inputtypes.chemcalc.render_to_html") as mock_render: mock_render.side_effect = Exception() - response = self.the_input.handle_ajax( - "preview_chemcalc", - {'formula': 'H2O + superterrible chemistry'} - ) - mock_log.warning.assert_called_once_with( - "Error while previewing chemical formula", exc_info=True - ) - assert 'error' in response - assert response['error'] == 'Error while rendering preview' + response = self.the_input.handle_ajax("preview_chemcalc", {"formula": "H2O + superterrible chemistry"}) + mock_log.warning.assert_called_once_with("Error while previewing chemical formula", exc_info=True) + assert "error" in response + assert response["error"] == "Error while rendering preview" class FormulaEquationTest(unittest.TestCase): """ Check that formula equation inputs work. """ + def setUp(self): super(FormulaEquationTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.size = "42" @@ -1251,30 +1254,27 @@ def setUp(self): element = etree.fromstring(xml_str) - state = { - 'value': 'x^2+1/2', - 'response_data': RESPONSE_DATA - } - self.the_input = lookup_tag('formulaequationinput')(mock_capa_system(), element, state) + state = {"value": "x^2+1/2", "response_data": RESPONSE_DATA} + self.the_input = lookup_tag("formulaequationinput")(mock_capa_system(), element, state) def test_rendering(self): """ Verify that the render context matches the expected render context """ context = self.the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'x^2+1/2', - 'status': inputtypes.Status('unanswered'), - 'msg': '', - 'size': self.size, - 'previewer': '/dummy-static/js/capa/src/formula_equation_preview.js', - 'inline': False, - 'trailing_text': '', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": "x^2+1/2", + "status": inputtypes.Status("unanswered"), + "msg": "", + "size": self.size, + "previewer": "/dummy-static/js/capa/src/formula_equation_preview.js", + "inline": False, + "trailing_text": "", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } assert context == expected @@ -1286,41 +1286,40 @@ def test_trailing_text_rendering(self): # store (xml_text, expected) trailing_text = [] # standard trailing text - trailing_text.append(('m/s', 'm/s')) + trailing_text.append(("m/s", "m/s")) # unicode trailing text - trailing_text.append(('\xc3', '\xc3')) + trailing_text.append(("\xc3", "\xc3")) # html escaped trailing text # this is the only one we expect to change - trailing_text.append(('a < b', 'a < b')) + trailing_text.append(("a < b", "a < b")) for xml_text, expected_text in trailing_text: xml_str = """""".format(size=size, tt=xml_text) + />""".format( + size=size, tt=xml_text + ) element = etree.fromstring(xml_str) - state = { - 'value': 'x^2+1/2', - 'response_data': RESPONSE_DATA - } - the_input = lookup_tag('formulaequationinput')(mock_capa_system(), element, state) + state = {"value": "x^2+1/2", "response_data": RESPONSE_DATA} + the_input = lookup_tag("formulaequationinput")(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'prob_1_2' + prob_id = "prob_1_2" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': 'x^2+1/2', - 'status': inputtypes.Status('unanswered'), - 'msg': '', - 'size': size, - 'previewer': '/dummy-static/js/capa/src/formula_equation_preview.js', - 'inline': False, - 'trailing_text': expected_text, - 'response_data': RESPONSE_DATA, - 'describedby_html': TRAILING_TEXT_DESCRIBEDBY.format(trailing_text_id=prob_id, status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": "x^2+1/2", + "status": inputtypes.Status("unanswered"), + "msg": "", + "size": size, + "previewer": "/dummy-static/js/capa/src/formula_equation_preview.js", + "inline": False, + "trailing_text": expected_text, + "response_data": RESPONSE_DATA, + "describedby_html": TRAILING_TEXT_DESCRIBEDBY.format(trailing_text_id=prob_id, status_id=prob_id), } assert context == expected @@ -1329,13 +1328,13 @@ def test_formcalc_ajax_sucess(self): """ Verify that using the correct dispatch and valid data produces a valid response """ - data = {'formula': "x^2+1/2", 'request_start': 0} + data = {"formula": "x^2+1/2", "request_start": 0} response = self.the_input.handle_ajax("preview_formcalc", data) - assert 'preview' in response - assert response['preview'] != '' - assert response['error'] == '' - assert response['request_start'] == data['request_start'] + assert "preview" in response + assert response["preview"] != "" + assert response["error"] == "" + assert response["request_start"] == data["request_start"] def test_ajax_bad_method(self): """ @@ -1350,50 +1349,57 @@ def test_ajax_no_formula(self): """ response = self.the_input.handle_ajax( "preview_formcalc", - {'request_start': 1, } + { + "request_start": 1, + }, ) - assert 'error' in response - assert response['error'] == 'No formula specified.' + assert "error" in response + assert response["error"] == "No formula specified." def test_ajax_parse_err(self): """ With parse errors, FormulaEquationInput should give an error message """ # Simulate answering a problem that raises the exception - with patch('xmodule.capa.inputtypes.latex_preview') as mock_preview: + with patch("xmodule.capa.inputtypes.latex_preview") as mock_preview: mock_preview.side_effect = ParseException("Oopsie") response = self.the_input.handle_ajax( "preview_formcalc", - {'formula': 'x^2+1/2', 'request_start': 1, } + { + "formula": "x^2+1/2", + "request_start": 1, + }, ) - assert 'error' in response - assert response['error'] == "Sorry, couldn't parse formula" + assert "error" in response + assert response["error"] == "Sorry, couldn't parse formula" - @patch('xmodule.capa.inputtypes.log') + @patch("xmodule.capa.inputtypes.log") def test_ajax_other_err(self, mock_log): """ With other errors, test that FormulaEquationInput also logs it """ - with patch('xmodule.capa.inputtypes.latex_preview') as mock_preview: + with patch("xmodule.capa.inputtypes.latex_preview") as mock_preview: mock_preview.side_effect = Exception() response = self.the_input.handle_ajax( "preview_formcalc", - {'formula': 'x^2+1/2', 'request_start': 1, } + { + "formula": "x^2+1/2", + "request_start": 1, + }, ) - mock_log.warning.assert_called_once_with( - "Error while previewing formula", exc_info=True - ) - assert 'error' in response - assert response['error'] == 'Error while rendering preview' + mock_log.warning.assert_called_once_with("Error while previewing formula", exc_info=True) + assert "error" in response + assert response["error"] == "Error while rendering preview" class DragAndDropTest(unittest.TestCase): """ Check that drag and drop inputs work """ + def test_rendering(self): - path_to_images = '/dummy-static/images/' + path_to_images = "/dummy-static/images/" xml_str = """ @@ -1410,55 +1416,84 @@ def test_rendering(self): - """.format(path=path_to_images) + """.format( + path=path_to_images + ) element = etree.fromstring(xml_str) - value = 'abc' - state = { - 'value': value, - 'status': 'unsubmitted', - 'response_data': RESPONSE_DATA - } + value = "abc" + state = {"value": value, "status": "unsubmitted", "response_data": RESPONSE_DATA} user_input = { # order matters, for string comparison - "target_outline": "false", - "base_image": "/dummy-static/images/about_1.png", - "draggables": [ - {"can_reuse": "", "label": "Label 1", "id": "1", "icon": "", "target_fields": []}, - {"can_reuse": "", "label": "cc", "id": "name_with_icon", "icon": "/dummy-static/images/cc.jpg", "target_fields": []}, # lint-amnesty, pylint: disable=line-too-long - {"can_reuse": "", "label": "arrow-left", "id": "with_icon", "icon": "/dummy-static/images/arrow-left.png", "target_fields": []}, # lint-amnesty, pylint: disable=line-too-long - {"can_reuse": "", "label": "Label2", "id": "5", "icon": "", "target_fields": []}, - {"can_reuse": "", "label": "Mute", "id": "2", "icon": "/dummy-static/images/mute.png", "target_fields": []}, # lint-amnesty, pylint: disable=line-too-long - {"can_reuse": "", "label": "spinner", "id": "name_label_icon3", "icon": "/dummy-static/images/spinner.gif", "target_fields": []}, # lint-amnesty, pylint: disable=line-too-long - {"can_reuse": "", "label": "Star", "id": "name4", "icon": "/dummy-static/images/volume.png", "target_fields": []}, # lint-amnesty, pylint: disable=line-too-long - {"can_reuse": "", "label": "Label3", "id": "7", "icon": "", "target_fields": []}], - "one_per_target": "True", - "targets": [ - {"y": "90", "x": "210", "id": "t1", "w": "90", "h": "90"}, - {"y": "160", "x": "370", "id": "t2", "w": "90", "h": "90"} - ] + "target_outline": "false", + "base_image": "/dummy-static/images/about_1.png", + "draggables": [ + {"can_reuse": "", "label": "Label 1", "id": "1", "icon": "", "target_fields": []}, + { + "can_reuse": "", + "label": "cc", + "id": "name_with_icon", + "icon": "/dummy-static/images/cc.jpg", + "target_fields": [], + }, # lint-amnesty, pylint: disable=line-too-long + { + "can_reuse": "", + "label": "arrow-left", + "id": "with_icon", + "icon": "/dummy-static/images/arrow-left.png", + "target_fields": [], + }, # lint-amnesty, pylint: disable=line-too-long + {"can_reuse": "", "label": "Label2", "id": "5", "icon": "", "target_fields": []}, + { + "can_reuse": "", + "label": "Mute", + "id": "2", + "icon": "/dummy-static/images/mute.png", + "target_fields": [], + }, # lint-amnesty, pylint: disable=line-too-long + { + "can_reuse": "", + "label": "spinner", + "id": "name_label_icon3", + "icon": "/dummy-static/images/spinner.gif", + "target_fields": [], + }, # lint-amnesty, pylint: disable=line-too-long + { + "can_reuse": "", + "label": "Star", + "id": "name4", + "icon": "/dummy-static/images/volume.png", + "target_fields": [], + }, # lint-amnesty, pylint: disable=line-too-long + {"can_reuse": "", "label": "Label3", "id": "7", "icon": "", "target_fields": []}, + ], + "one_per_target": "True", + "targets": [ + {"y": "90", "x": "210", "id": "t1", "w": "90", "h": "90"}, + {"y": "160", "x": "370", "id": "t2", "w": "90", "h": "90"}, + ], } - the_input = lookup_tag('drag_and_drop_input')(mock_capa_system(), element, state) - prob_id = 'prob_1_2' + the_input = lookup_tag("drag_and_drop_input")(mock_capa_system(), element, state) + prob_id = "prob_1_2" context = the_input._get_render_context() # pylint: disable=protected-access expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'value': value, - 'status': inputtypes.Status('unsubmitted'), - 'msg': '', - 'drag_and_drop_json': json.dumps(user_input), - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "value": value, + "status": inputtypes.Status("unsubmitted"), + "msg": "", + "drag_and_drop_json": json.dumps(user_input), + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } # as we are dumping 'draggables' dicts while dumping user_input, string # comparison will fail, as order of keys is random. - assert json.loads(context['drag_and_drop_json']) == user_input - context.pop('drag_and_drop_json') - expected.pop('drag_and_drop_json') + assert json.loads(context["drag_and_drop_json"]) == user_input + context.pop("drag_and_drop_json") + expected.pop("drag_and_drop_json") assert context == expected @@ -1466,8 +1501,9 @@ class AnnotationInputTest(unittest.TestCase): """ Make sure option inputs work """ + def test_rendering(self): - xml_str = ''' + xml_str = """ foo bar @@ -1480,47 +1516,42 @@ def test_rendering(self): -''' +""" element = etree.fromstring(xml_str) value = {"comment": "blah blah", "options": [1]} json_value = json.dumps(value) - state = { - 'value': json_value, - 'id': 'annotation_input', - 'status': 'answered', - 'response_data': RESPONSE_DATA - } + state = {"value": json_value, "id": "annotation_input", "status": "answered", "response_data": RESPONSE_DATA} - tag = 'annotationinput' + tag = "annotationinput" the_input = lookup_tag(tag)(mock_capa_system(), element, state) context = the_input._get_render_context() # pylint: disable=protected-access - prob_id = 'annotation_input' + prob_id = "annotation_input" expected = { - 'STATIC_URL': '/dummy-static/', - 'id': prob_id, - 'status': inputtypes.Status('answered'), - 'msg': '', - 'title': 'foo', - 'text': 'bar', - 'comment': 'my comment', - 'comment_prompt': 'type a commentary', - 'tag_prompt': 'select a tag', - 'options': [ - {'id': 0, 'description': 'x', 'choice': 'correct'}, - {'id': 1, 'description': 'y', 'choice': 'incorrect'}, - {'id': 2, 'description': 'z', 'choice': 'partially-correct'} + "STATIC_URL": "/dummy-static/", + "id": prob_id, + "status": inputtypes.Status("answered"), + "msg": "", + "title": "foo", + "text": "bar", + "comment": "my comment", + "comment_prompt": "type a commentary", + "tag_prompt": "select a tag", + "options": [ + {"id": 0, "description": "x", "choice": "correct"}, + {"id": 1, "description": "y", "choice": "incorrect"}, + {"id": 2, "description": "z", "choice": "partially-correct"}, ], - 'value': json_value, - 'options_value': value['options'], - 'has_options_value': len(value['options']) > 0, - 'comment_value': value['comment'], - 'debug': False, - 'return_to_annotation': True, - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "value": json_value, + "options_value": value["options"], + "has_options_value": len(value["options"]) > 0, + "comment_value": value["comment"], + "debug": False, + "return_to_annotation": True, + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } self.maxDiff = None @@ -1531,6 +1562,7 @@ class TestChoiceText(unittest.TestCase): """ Tests for checkboxtextgroup inputs """ + @staticmethod def build_choice_element(node_type, contents, tail_text, value): """ @@ -1538,9 +1570,9 @@ def build_choice_element(node_type, contents, tail_text, value): """ # When xml is being parsed numtolerance_input and decoy_input tags map to textinput type # in order to provide the template with correct rendering information. - if node_type in ('numtolerance_input', 'decoy_input'): - node_type = 'textinput' - choice = {'type': node_type, 'contents': contents, 'tail_text': tail_text, 'value': value} + if node_type in ("numtolerance_input", "decoy_input"): + node_type = "textinput" + choice = {"type": node_type, "contents": contents, "tail_text": tail_text, "value": value} return choice def check_group(self, tag, choice_tag, expected_input_type): @@ -1557,35 +1589,32 @@ def check_group(self, tag, choice_tag, expected_input_type): <{choice_tag} correct="false" name="choiceinput_0">this isfalse Is a number! - """.format(tag=tag, choice_tag=choice_tag) + """.format( + tag=tag, choice_tag=choice_tag + ) element = etree.fromstring(xml_str) - prob_id = 'choicetext_input' - state = { - 'value': '{}', - 'id': prob_id, - 'status': inputtypes.Status('answered'), - 'response_data': RESPONSE_DATA - } + prob_id = "choicetext_input" + state = {"value": "{}", "id": prob_id, "status": inputtypes.Status("answered"), "response_data": RESPONSE_DATA} - first_input = self.build_choice_element('numtolerance_input', 'choiceinput_0_textinput_0', 'false', '') - second_input = self.build_choice_element('decoy_input', 'choiceinput_1_textinput_0', '', '') - first_choice_content = self.build_choice_element('text', 'this is', '', '') - second_choice_content = self.build_choice_element('text', 'Is a number', '', '') - second_choice_text = self.build_choice_element('text', "!", '', '') + first_input = self.build_choice_element("numtolerance_input", "choiceinput_0_textinput_0", "false", "") + second_input = self.build_choice_element("decoy_input", "choiceinput_1_textinput_0", "", "") + first_choice_content = self.build_choice_element("text", "this is", "", "") + second_choice_content = self.build_choice_element("text", "Is a number", "", "") + second_choice_text = self.build_choice_element("text", "!", "", "") choices = [ - ('choiceinput_0', [first_choice_content, first_input]), - ('choiceinput_1', [second_choice_content, second_input, second_choice_text]) + ("choiceinput_0", [first_choice_content, first_input]), + ("choiceinput_1", [second_choice_content, second_input, second_choice_text]), ] expected = { - 'STATIC_URL': '/dummy-static/', - 'msg': '', - 'input_type': expected_input_type, - 'choices': choices, - 'show_correctness': 'always', - 'submitted_message': 'Answer received.', - 'response_data': RESPONSE_DATA, - 'describedby_html': DESCRIBEDBY.format(status_id=prob_id) + "STATIC_URL": "/dummy-static/", + "msg": "", + "input_type": expected_input_type, + "choices": choices, + "show_correctness": "always", + "submitted_message": "Answer received.", + "response_data": RESPONSE_DATA, + "describedby_html": DESCRIBEDBY.format(status_id=prob_id), } expected.update(state) the_input = lookup_tag(tag)(mock_capa_system(), element, state) @@ -1597,21 +1626,21 @@ def test_radiotextgroup(self): Test that a properly formatted radiotextgroup problem generates expected ouputs """ - self.check_group('radiotextgroup', 'choice', 'radio') + self.check_group("radiotextgroup", "choice", "radio") def test_checkboxtextgroup(self): """ Test that a properly formatted checkboxtextgroup problem generates expected ouput """ - self.check_group('checkboxtextgroup', 'choice', 'checkbox') + self.check_group("checkboxtextgroup", "choice", "checkbox") def test_invalid_tag(self): """ Test to ensure that an unrecognized inputtype tag causes an error """ with pytest.raises(Exception): - self.check_group('invalid', 'choice', 'checkbox') + self.check_group("invalid", "choice", "checkbox") def test_invalid_input_tag(self): """ @@ -1619,30 +1648,31 @@ def test_invalid_input_tag(self): a checkbox or radiotextgroup problem raises an error. """ with self.assertRaisesRegex(Exception, "Error in xml"): - self.check_group('checkboxtextgroup', 'invalid', 'checkbox') + self.check_group("checkboxtextgroup", "invalid", "checkbox") class TestStatus(unittest.TestCase): """ Tests for Status class """ + def test_str(self): """ Test stringifing Status objects """ - statobj = inputtypes.Status('test') - assert str(statobj) == 'test' + statobj = inputtypes.Status("test") + assert str(statobj) == "test" def test_classes(self): """ Test that css classnames are correct """ css_classes = [ - ('unsubmitted', 'unanswered'), - ('incomplete', 'incorrect'), - ('queued', 'processing'), - ('correct', 'correct'), - ('test', 'test'), + ("unsubmitted", "unanswered"), + ("incomplete", "incorrect"), + ("queued", "processing"), + ("correct", "correct"), + ("test", "test"), ] for status, classname in css_classes: statobj = inputtypes.Status(status) @@ -1653,13 +1683,13 @@ def test_display_names(self): Test that display names are correct """ names = [ - ('correct', 'correct'), - ('incorrect', 'incorrect'), - ('incomplete', 'incomplete'), - ('unanswered', 'unanswered'), - ('unsubmitted', 'unanswered'), - ('queued', 'processing'), - ('dave', 'dave'), + ("correct", "correct"), + ("incorrect", "incorrect"), + ("incomplete", "incomplete"), + ("unanswered", "unanswered"), + ("unsubmitted", "unanswered"), + ("queued", "processing"), + ("dave", "dave"), ] for status, display_name in names: statobj = inputtypes.Status(status) @@ -1671,11 +1701,11 @@ def test_translated_names(self): """ func = lambda t: t.upper() # status is in the mapping - statobj = inputtypes.Status('queued', func) - assert statobj.display_name == 'PROCESSING' + statobj = inputtypes.Status("queued", func) + assert statobj.display_name == "PROCESSING" # status is not in the mapping - statobj = inputtypes.Status('test', func) - assert statobj.display_name == 'test' - assert str(statobj) == 'test' - assert statobj.classname == 'test' + statobj = inputtypes.Status("test", func) + assert statobj.display_name == "test" + assert str(statobj) == "test" + assert statobj.classname == "test" diff --git a/xmodule/capa/tests/test_responsetypes.py b/xmodule/capa/tests/test_responsetypes.py index ca9f5eba59a6..711d6c8e3e70 100644 --- a/xmodule/capa/tests/test_responsetypes.py +++ b/xmodule/capa/tests/test_responsetypes.py @@ -11,16 +11,16 @@ from datetime import datetime from unittest import mock -import pytest import calc import pyparsing +import pytest import random2 as random import requests from pytz import UTC from xmodule.capa.correctmap import CorrectMap from xmodule.capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError -from xmodule.capa.tests.helpers import load_fixture, new_loncapa_problem, mock_capa_system +from xmodule.capa.tests.helpers import load_fixture, mock_capa_system, new_loncapa_problem from xmodule.capa.tests.response_xml_factory import ( AnnotationResponseXMLFactory, ChoiceResponseXMLFactory, @@ -35,7 +35,7 @@ SchematicResponseXMLFactory, StringResponseXMLFactory, SymbolicResponseXMLFactory, - TrueFalseResponseXMLFactory + TrueFalseResponseXMLFactory, ) from xmodule.capa.tests.test_util import use_unsafe_codejail from xmodule.capa.util import convert_files_to_filenames @@ -61,26 +61,26 @@ def build_problem(self, capa_system=None, **kwargs): # pylint: disable=missing-function-docstring def assert_grade(self, problem, submission, expected_correctness, msg=None): - input_dict = {'1_2_1': submission} + input_dict = {"1_2_1": submission} correct_map = problem.grade_answers(input_dict) if msg is None: - assert correct_map.get_correctness('1_2_1') == expected_correctness + assert correct_map.get_correctness("1_2_1") == expected_correctness else: - assert correct_map.get_correctness('1_2_1') == expected_correctness, msg + assert correct_map.get_correctness("1_2_1") == expected_correctness, msg def assert_answer_format(self, problem): answers = problem.get_question_answers() - assert answers['1_2_1'] is not None + assert answers["1_2_1"] is not None # pylint: disable=missing-function-docstring def assert_multiple_grade(self, problem, correct_answers, incorrect_answers): for input_str in correct_answers: - result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') - assert result == 'correct' + result = problem.grade_answers({"1_2_1": input_str}).get_correctness("1_2_1") + assert result == "correct" for input_str in incorrect_answers: - result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') - assert result == 'incorrect' + result = problem.grade_answers({"1_2_1": input_str}).get_correctness("1_2_1") + assert result == "incorrect" def assert_multiple_partial(self, problem, correct_answers, incorrect_answers, partial_answers): """ @@ -88,16 +88,16 @@ def assert_multiple_partial(self, problem, correct_answers, incorrect_answers, p and partially correct answers, all passed as lists. """ for input_str in correct_answers: - result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') - assert result == 'correct' + result = problem.grade_answers({"1_2_1": input_str}).get_correctness("1_2_1") + assert result == "correct" for input_str in incorrect_answers: - result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') - assert result == 'incorrect' + result = problem.grade_answers({"1_2_1": input_str}).get_correctness("1_2_1") + assert result == "incorrect" for input_str in partial_answers: - result = problem.grade_answers({'1_2_1': input_str}).get_correctness('1_2_1') - assert result == 'partially-correct' + result = problem.grade_answers({"1_2_1": input_str}).get_correctness("1_2_1") + assert result == "partially-correct" def _get_random_number_code(self): """Returns code to be used to generate a random result.""" @@ -117,61 +117,59 @@ def test_multiple_choice_grade(self): problem = self.build_problem(choices=[False, True, False]) # Ensure that we get the expected grades - self.assert_grade(problem, 'choice_0', 'incorrect') - self.assert_grade(problem, 'choice_1', 'correct') - self.assert_grade(problem, 'choice_2', 'incorrect') + self.assert_grade(problem, "choice_0", "incorrect") + self.assert_grade(problem, "choice_1", "correct") + self.assert_grade(problem, "choice_2", "incorrect") def test_partial_multiple_choice_grade(self): - problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points') + problem = self.build_problem(choices=[False, True, "partial"], credit_type="points") # Ensure that we get the expected grades - self.assert_grade(problem, 'choice_0', 'incorrect') - self.assert_grade(problem, 'choice_1', 'correct') - self.assert_grade(problem, 'choice_2', 'partially-correct') + self.assert_grade(problem, "choice_0", "incorrect") + self.assert_grade(problem, "choice_1", "correct") + self.assert_grade(problem, "choice_2", "partially-correct") def test_named_multiple_choice_grade(self): - problem = self.build_problem(choices=[False, True, False], - choice_names=["foil_1", "foil_2", "foil_3"]) + problem = self.build_problem(choices=[False, True, False], choice_names=["foil_1", "foil_2", "foil_3"]) # Ensure that we get the expected grades - self.assert_grade(problem, 'choice_foil_1', 'incorrect') - self.assert_grade(problem, 'choice_foil_2', 'correct') - self.assert_grade(problem, 'choice_foil_3', 'incorrect') + self.assert_grade(problem, "choice_foil_1", "incorrect") + self.assert_grade(problem, "choice_foil_2", "correct") + self.assert_grade(problem, "choice_foil_3", "incorrect") def test_multiple_choice_valid_grading_schemes(self): # Multiple Choice problems only allow one partial credit scheme. # Change this test if that changes. - problem = self.build_problem(choices=[False, True, 'partial'], credit_type='points,points') + problem = self.build_problem(choices=[False, True, "partial"], credit_type="points,points") with pytest.raises(LoncapaProblemError): - input_dict = {'1_2_1': 'choice_1'} + input_dict = {"1_2_1": "choice_1"} problem.grade_answers(input_dict) # 'bongo' is not a valid grading scheme. - problem = self.build_problem(choices=[False, True, 'partial'], credit_type='bongo') + problem = self.build_problem(choices=[False, True, "partial"], credit_type="bongo") with pytest.raises(LoncapaProblemError): - input_dict = {'1_2_1': 'choice_1'} + input_dict = {"1_2_1": "choice_1"} problem.grade_answers(input_dict) def test_partial_points_multiple_choice_grade(self): problem = self.build_problem( - choices=['partial', 'partial', 'partial'], - credit_type='points', - points=['1', '0.6', '0'] + choices=["partial", "partial", "partial"], credit_type="points", points=["1", "0.6", "0"] ) # Ensure that we get the expected number of points # Using assertAlmostEqual to avoid floating point issues - correct_map = problem.grade_answers({'1_2_1': 'choice_0'}) - assert round(correct_map.get_npoints('1_2_1') - 1, 7) >= 0 + correct_map = problem.grade_answers({"1_2_1": "choice_0"}) + assert round(correct_map.get_npoints("1_2_1") - 1, 7) >= 0 - correct_map = problem.grade_answers({'1_2_1': 'choice_1'}) - assert round(correct_map.get_npoints('1_2_1') - 0.6, 7) >= 0 + correct_map = problem.grade_answers({"1_2_1": "choice_1"}) + assert round(correct_map.get_npoints("1_2_1") - 0.6, 7) >= 0 - correct_map = problem.grade_answers({'1_2_1': 'choice_2'}) - assert round(correct_map.get_npoints('1_2_1') - 0, 7) >= 0 + correct_map = problem.grade_answers({"1_2_1": "choice_2"}) + assert round(correct_map.get_npoints("1_2_1") - 0, 7) >= 0 def test_contextualized_choices(self): - script = textwrap.dedent(""" + script = textwrap.dedent( + """ a = 2 b = 9 c = a + b @@ -184,23 +182,19 @@ def test_contextualized_choices(self): ok2 = "partial" text2 = "infinity may be both" - """) + """ + ) choices = ["$ok0", "$ok1", "$ok2"] - choice_names = ["$text0 ... (should be $ok0)", - "$text1 ... (should be $ok1)", - "$text2 ... (should be $ok2)"] - problem = self.build_problem(script=script, - choices=choices, - choice_names=choice_names, - credit_type='points') + choice_names = ["$text0 ... (should be $ok0)", "$text1 ... (should be $ok1)", "$text2 ... (should be $ok2)"] + problem = self.build_problem(script=script, choices=choices, choice_names=choice_names, credit_type="points") # Ensure the expected correctness and choice names - self.assert_grade(problem, 'choice_2 + 9 is even ... (should be False)', 'incorrect') - self.assert_grade(problem, 'choice_2 + 9 is odd ... (should be True)', 'correct') - self.assert_grade(problem, 'choice_infinity may be both ... (should be partial)', 'partially-correct') + self.assert_grade(problem, "choice_2 + 9 is even ... (should be False)", "incorrect") + self.assert_grade(problem, "choice_2 + 9 is odd ... (should be True)", "correct") + self.assert_grade(problem, "choice_infinity may be both ... (should be partial)", "partially-correct") -class TrueFalseResponseTest(ResponseTest): # pylint: disable=missing-class-docstring +class TrueFalseResponseTest(ResponseTest): # pylint: disable=missing-class-docstring xml_factory_class = TrueFalseResponseXMLFactory def test_true_false_grade(self): @@ -208,40 +202,39 @@ def test_true_false_grade(self): # Check the results # Mark correct if and only if ALL (and only) correct choices selected - self.assert_grade(problem, 'choice_0', 'incorrect') - self.assert_grade(problem, 'choice_1', 'incorrect') - self.assert_grade(problem, 'choice_2', 'incorrect') - self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2'], 'incorrect') - self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect') - self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') - self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct') + self.assert_grade(problem, "choice_0", "incorrect") + self.assert_grade(problem, "choice_1", "incorrect") + self.assert_grade(problem, "choice_2", "incorrect") + self.assert_grade(problem, ["choice_0", "choice_1", "choice_2"], "incorrect") + self.assert_grade(problem, ["choice_0", "choice_2"], "incorrect") + self.assert_grade(problem, ["choice_0", "choice_1"], "incorrect") + self.assert_grade(problem, ["choice_1", "choice_2"], "correct") # Invalid choices should be marked incorrect (we have no choice 3) - self.assert_grade(problem, 'choice_3', 'incorrect') - self.assert_grade(problem, 'not_a_choice', 'incorrect') + self.assert_grade(problem, "choice_3", "incorrect") + self.assert_grade(problem, "not_a_choice", "incorrect") def test_named_true_false_grade(self): - problem = self.build_problem(choices=[False, True, True], - choice_names=['foil_1', 'foil_2', 'foil_3']) + problem = self.build_problem(choices=[False, True, True], choice_names=["foil_1", "foil_2", "foil_3"]) # Check the results # Mark correct if and only if ALL (and only) correct chocies selected - self.assert_grade(problem, 'choice_foil_1', 'incorrect') - self.assert_grade(problem, 'choice_foil_2', 'incorrect') - self.assert_grade(problem, 'choice_foil_3', 'incorrect') - self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2', 'choice_foil_3'], 'incorrect') - self.assert_grade(problem, ['choice_foil_1', 'choice_foil_3'], 'incorrect') - self.assert_grade(problem, ['choice_foil_1', 'choice_foil_2'], 'incorrect') - self.assert_grade(problem, ['choice_foil_2', 'choice_foil_3'], 'correct') + self.assert_grade(problem, "choice_foil_1", "incorrect") + self.assert_grade(problem, "choice_foil_2", "incorrect") + self.assert_grade(problem, "choice_foil_3", "incorrect") + self.assert_grade(problem, ["choice_foil_1", "choice_foil_2", "choice_foil_3"], "incorrect") + self.assert_grade(problem, ["choice_foil_1", "choice_foil_3"], "incorrect") + self.assert_grade(problem, ["choice_foil_1", "choice_foil_2"], "incorrect") + self.assert_grade(problem, ["choice_foil_2", "choice_foil_3"], "correct") # Invalid choices should be marked incorrect - self.assert_grade(problem, 'choice_foil_4', 'incorrect') - self.assert_grade(problem, 'not_a_choice', 'incorrect') + self.assert_grade(problem, "choice_foil_4", "incorrect") + self.assert_grade(problem, "not_a_choice", "incorrect") def test_single_correct_response(self): problem = self.build_problem(choices=[True, False]) - self.assert_grade(problem, 'choice_0', 'correct') - self.assert_grade(problem, ['choice_0'], 'correct') + self.assert_grade(problem, "choice_0", "correct") + self.assert_grade(problem, ["choice_0"], "correct") class ImageResponseTest(ResponseTest): # pylint: disable=missing-class-docstring @@ -253,8 +246,7 @@ def test_rectangle_grade(self): # Anything inside the rectangle (and along the borders) is correct # Everything else is incorrect - correct_inputs = ["[12,19]", "[10,10]", "[20,20]", - "[10,15]", "[20,15]", "[15,10]", "[15,20]"] + correct_inputs = ["[12,19]", "[10,10]", "[20,20]", "[10,15]", "[20,15]", "[15,10]", "[15,20]"] incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]"] self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) @@ -265,8 +257,7 @@ def test_multiple_rectangles_grade(self): # Expect that only points inside the rectangles are marked correct problem = self.build_problem(rectangle=rectangle_str) correct_inputs = ["[12,19]", "[120, 130]"] - incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]", - "[50,55]", "[300, 14]", "[120, 400]"] + incorrect_inputs = ["[4,6]", "[25,15]", "[15,40]", "[15,4]", "[50,55]", "[300, 14]", "[120, 400]"] self.assert_multiple_grade(problem, correct_inputs, incorrect_inputs) def test_region_grade(self): @@ -315,33 +306,44 @@ def test_grade_single_input_incorrect(self): # Incorrect answers incorrect_inputs = [ - ('0', ''), - ('4x+3y', textwrap.dedent(""" + ("0", ""), + ( + "4x+3y", + textwrap.dedent( + """ 4*x+3*y - """)), + """ + ), + ), ] - for (input_str, input_mathml) in incorrect_inputs: - self._assert_symbolic_grade(problem, input_str, input_mathml, 'incorrect') + for input_str, input_mathml in incorrect_inputs: + self._assert_symbolic_grade(problem, input_str, input_mathml, "incorrect") def test_complex_number_grade_incorrect(self): - problem = self.build_problem(math_display=True, - expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]", - options=["matrix", "imaginary"]) + problem = self.build_problem( + math_display=True, + expect="[[cos(theta),i*sin(theta)],[i*sin(theta),cos(theta)]]", + options=["matrix", "imaginary"], + ) - wrong_snuggletex = load_fixture('snuggletex_wrong.html') - dynamath_input = textwrap.dedent(""" + wrong_snuggletex = load_fixture("snuggletex_wrong.html") + dynamath_input = textwrap.dedent( + """ 2 - """) + """ + ) self._assert_symbolic_grade( - problem, "2", dynamath_input, - 'incorrect', + problem, + "2", + dynamath_input, + "incorrect", snuggletex_resp=wrong_snuggletex, ) @@ -352,10 +354,7 @@ def test_multiple_inputs_exception(self): with pytest.raises(Exception): self.build_problem(math_display=True, expect="2*x+3*y", num_inputs=3) - def _assert_symbolic_grade( - self, problem, student_input, dynamath_input, expected_correctness, - snuggletex_resp="" - ): + def _assert_symbolic_grade(self, problem, student_input, dynamath_input, expected_correctness, snuggletex_resp=""): """ Assert that the symbolic response has a certain grade. @@ -365,16 +364,15 @@ def _assert_symbolic_grade( `expected_correctness` is either "correct" or "incorrect" `snuggletex_resp` is the simulated response from the Snuggletex server """ - input_dict = {'1_2_1': str(student_input), - '1_2_1_dynamath': str(dynamath_input)} + input_dict = {"1_2_1": str(student_input), "1_2_1_dynamath": str(dynamath_input)} # Simulate what the Snuggletex server would respond - with mock.patch.object(requests, 'post') as mock_post: + with mock.patch.object(requests, "post") as mock_post: mock_post.return_value.text = snuggletex_resp correct_map = problem.grade_answers(input_dict) - assert correct_map.get_correctness('1_2_1') == expected_correctness + assert correct_map.get_correctness("1_2_1") == expected_correctness @use_unsafe_codejail() @@ -382,8 +380,7 @@ class OptionResponseTest(ResponseTest): # pylint: disable=missing-class-docstri xml_factory_class = OptionResponseXMLFactory def test_grade(self): - problem = self.build_problem(options=["first", "second", "third"], - correct_option="second") + problem = self.build_problem(options=["first", "second", "third"], correct_option="second") # Assert that we get the expected grades self.assert_grade(problem, "first", "incorrect") @@ -395,34 +392,31 @@ def test_grade(self): def test_quote_option(self): # Test that option response properly escapes quotes inside options strings - problem = self.build_problem(options=["hasnot", "hasn't", "has'nt"], - correct_option="hasn't") + problem = self.build_problem(options=["hasnot", "hasn't", "has'nt"], correct_option="hasn't") # Assert that correct option with a quote inside is marked correctly self.assert_grade(problem, "hasnot", "incorrect") self.assert_grade(problem, "hasn't", "correct") - self.assert_grade(problem, "hasn\'t", "correct") + self.assert_grade(problem, "hasn't", "correct") self.assert_grade(problem, "has'nt", "incorrect") def test_variable_options(self): """ Test that if variable are given in option response then correct map must contain answervariable value. """ - script = textwrap.dedent("""\ + script = textwrap.dedent( + """\ a = 1000 b = a*2 c = a*3 - """) - problem = self.build_problem( - options=['$a', '$b', '$c'], - correct_option='$a', - script=script + """ ) + problem = self.build_problem(options=["$a", "$b", "$c"], correct_option="$a", script=script) - input_dict = {'1_2_1': '1000'} + input_dict = {"1_2_1": "1000"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_correctness('1_2_1') == 'correct' - assert correct_map.get_property('1_2_1', 'answervariable') == '$a' + assert correct_map.get_correctness("1_2_1") == "correct" + assert correct_map.get_property("1_2_1", "answervariable") == "$a" @use_unsafe_codejail() @@ -430,6 +424,7 @@ class FormulaResponseTest(ResponseTest): """ Test the FormulaResponse class """ + xml_factory_class = FormulaResponseXMLFactory def test_grade(self): @@ -439,13 +434,10 @@ def test_grade(self): Specifically, if it can understand equivalence of formulae """ # Sample variables x and y in the range [-10, 10] - sample_dict = {'x': (-10, 10), 'y': (-10, 10)} + sample_dict = {"x": (-10, 10), "y": (-10, 10)} # The expected solution is numerically equivalent to x+2y - problem = self.build_problem(sample_dict=sample_dict, - num_samples=10, - tolerance=0.01, - answer="x+2*y") + problem = self.build_problem(sample_dict=sample_dict, num_samples=10, tolerance=0.01, answer="x+2*y") # Expect an equivalent formula to be marked correct # 2x - x + y + y = x + 2y @@ -462,29 +454,29 @@ def test_hint(self): Test the hint-giving functionality of FormulaResponse """ # Sample variables x and y in the range [-10, 10] - sample_dict = {'x': (-10, 10), 'y': (-10, 10)} + sample_dict = {"x": (-10, 10), "y": (-10, 10)} # Give a hint if the user leaves off the coefficient # or leaves out x - hints = [('x + 3*y', 'y_coefficient', 'Check the coefficient of y'), - ('2*y', 'missing_x', 'Try including the variable x')] + hints = [ + ("x + 3*y", "y_coefficient", "Check the coefficient of y"), + ("2*y", "missing_x", "Try including the variable x"), + ] # The expected solution is numerically equivalent to x+2y - problem = self.build_problem(sample_dict=sample_dict, - num_samples=10, - tolerance=0.01, - answer="x+2*y", - hints=hints) + problem = self.build_problem( + sample_dict=sample_dict, num_samples=10, tolerance=0.01, answer="x+2*y", hints=hints + ) # Expect to receive a hint if we add an extra y - input_dict = {'1_2_1': "x + 2*y + y"} + input_dict = {"1_2_1": "x + 2*y + y"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'Check the coefficient of y' + assert correct_map.get_hint("1_2_1") == "Check the coefficient of y" # Expect to receive a hint if we leave out x - input_dict = {'1_2_1': "2*y"} + input_dict = {"1_2_1": "2*y"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'Try including the variable x' + assert correct_map.get_hint("1_2_1") == "Try including the variable x" def test_script(self): """ @@ -495,18 +487,16 @@ def test_script(self): script = "calculated_ans = 'x+x'" # Sample x in the range [-10,10] - sample_dict = {'x': (-10, 10)} + sample_dict = {"x": (-10, 10)} # The expected solution is numerically equivalent to 2*x - problem = self.build_problem(sample_dict=sample_dict, - num_samples=10, - tolerance=0.01, - answer="$calculated_ans", - script=script) + problem = self.build_problem( + sample_dict=sample_dict, num_samples=10, tolerance=0.01, answer="$calculated_ans", script=script + ) # Expect that the inputs are graded correctly - self.assert_grade(problem, '2*x', 'correct') - self.assert_grade(problem, '3*x', 'incorrect') + self.assert_grade(problem, "2*x", "correct") + self.assert_grade(problem, "3*x", "incorrect") def test_grade_infinity(self): """ @@ -514,13 +504,10 @@ def test_grade_infinity(self): erroneously marked as correct. """ - sample_dict = {'x': (1, 2)} + sample_dict = {"x": (1, 2)} # Test problem - problem = self.build_problem(sample_dict=sample_dict, - num_samples=10, - tolerance="1%", - answer="x") + problem = self.build_problem(sample_dict=sample_dict, num_samples=10, tolerance="1%", answer="x") # Expect such a large answer to be marked incorrect input_formula = "x*1e999" self.assert_grade(problem, input_formula, "incorrect") @@ -533,13 +520,10 @@ def test_grade_nan(self): Test that expressions that evaluate to NaN are not marked as correct. """ - sample_dict = {'x': (1, 2)} + sample_dict = {"x": (1, 2)} # Test problem - problem = self.build_problem(sample_dict=sample_dict, - num_samples=10, - tolerance="1%", - answer="x") + problem = self.build_problem(sample_dict=sample_dict, num_samples=10, tolerance="1%", answer="x") # Expect an incorrect answer (+ nan) to be marked incorrect # Right now this evaluates to 'nan' for a given x (Python implementation-dependent) input_formula = "10*x + 0*1e999" @@ -552,27 +536,21 @@ def test_raises_zero_division_err(self): """ See if division by zero raises an error. """ - sample_dict = {'x': (1, 2)} - problem = self.build_problem(sample_dict=sample_dict, - num_samples=10, - tolerance="1%", - answer="x") # Answer doesn't matter - input_dict = {'1_2_1': '1/0'} + sample_dict = {"x": (1, 2)} + problem = self.build_problem( + sample_dict=sample_dict, num_samples=10, tolerance="1%", answer="x" + ) # Answer doesn't matter + input_dict = {"1_2_1": "1/0"} self.assertRaises(StudentInputError, problem.grade_answers, input_dict) def test_validate_answer(self): """ Makes sure that validate_answer works. """ - sample_dict = {'x': (1, 2)} - problem = self.build_problem( - sample_dict=sample_dict, - num_samples=10, - tolerance="1%", - answer="x" - ) - assert list(problem.responders.values())[0].validate_answer('14*x') - assert not list(problem.responders.values())[0].validate_answer('3*y+2*x') + sample_dict = {"x": (1, 2)} + problem = self.build_problem(sample_dict=sample_dict, num_samples=10, tolerance="1%", answer="x") + assert list(problem.responders.values())[0].validate_answer("14*x") + assert not list(problem.responders.values())[0].validate_answer("3*y+2*x") @use_unsafe_codejail() @@ -639,7 +617,7 @@ def test_regexp(self): "Martin Luther King Junior", "Doctor Martin Luther King Junior", "Dr. Martin Luther King Jr.", - "Martin Luther King" + "Martin Luther King", ] problem = self.build_problem(answer=r"\w*\.?.*Luther King\s*.*", case_sensitive=True, regexp=True) @@ -659,10 +637,7 @@ def test_regexp(self): "^4|Four$", ] problem = self.build_problem( - answer="just_sample", - case_sensitive=False, - regexp=True, - additional_answers=regexps + answer="just_sample", case_sensitive=False, regexp=True, additional_answers=regexps ) self.assert_grade(problem, "One", "correct") @@ -676,7 +651,7 @@ def test_regexp(self): self.assert_grade(problem, "|", "incorrect") # test unicode - problem = self.build_problem(answer="æ", case_sensitive=False, regexp=True, additional_answers=['ö']) + problem = self.build_problem(answer="æ", case_sensitive=False, regexp=True, additional_answers=["ö"]) self.assert_grade(problem, "æ", "correct") self.assert_grade(problem, "ö", "correct") self.assert_grade(problem, "î", "incorrect") @@ -732,12 +707,8 @@ def test_case_sensitive(self): answers = ["Second", "Third", "Fourth"] # set up problems - problem_specified = self.build_problem( - answer="sample_answer", case_sensitive=True, additional_answers=answers - ) - problem_not_specified = self.build_problem( - answer="sample_answer", additional_answers=answers - ) + problem_specified = self.build_problem(answer="sample_answer", case_sensitive=True, additional_answers=answers) + problem_not_specified = self.build_problem(answer="sample_answer", additional_answers=answers) problems = [problem_specified, problem_not_specified] for problem in problems: for answer in answers: @@ -791,16 +762,16 @@ def test_compatible_non_attribute_additional_answer_xml(self): self.assert_grade(problem, "Meh", "incorrect") def test_partial_matching(self): - problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=['.?\\d.?']) + problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=[".?\\d.?"]) self.assert_grade(problem, "a3", "correct") self.assert_grade(problem, "3a", "correct") def test_exception(self): - problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=['?\\d?']) + problem = self.build_problem(answer="a2", case_sensitive=False, regexp=True, additional_answers=["?\\d?"]) with pytest.raises(Exception) as cm: self.assert_grade(problem, "a3", "correct") exception_message = str(cm.value) - assert 'nothing to repeat' in exception_message + assert "nothing to repeat" in exception_message def test_hints(self): @@ -814,24 +785,24 @@ def test_hints(self): hints=hints, ) # We should get a hint for Wisconsin - input_dict = {'1_2_1': 'Wisconsin'} + input_dict = {"1_2_1": "Wisconsin"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'The state capital of Wisconsin is Madison' + assert correct_map.get_hint("1_2_1") == "The state capital of Wisconsin is Madison" # We should get a hint for Minnesota - input_dict = {'1_2_1': 'Minnesota'} + input_dict = {"1_2_1": "Minnesota"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'The state capital of Minnesota is St. Paul' + assert correct_map.get_hint("1_2_1") == "The state capital of Minnesota is St. Paul" # We should NOT get a hint for Michigan (the correct answer) - input_dict = {'1_2_1': 'Michigan'} + input_dict = {"1_2_1": "Michigan"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == '' + assert correct_map.get_hint("1_2_1") == "" # We should NOT get a hint for any other string - input_dict = {'1_2_1': 'California'} + input_dict = {"1_2_1": "California"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == '' + assert correct_map.get_hint("1_2_1") == "" def test_hints_regexp_and_answer_regexp(self): different_student_answers = [ @@ -846,84 +817,86 @@ def test_hints_regexp_and_answer_regexp(self): ("wisconsin", "wisc", "The state capital of Wisconsin is Madison"), ("minnesota", "minn", "The state capital of Minnesota is St. Paul"), (".*Boston.*", "bst", "First letter of correct answer is M."), - ('^\\d9$', "numbers", "Should not end with 9."), + ("^\\d9$", "numbers", "Should not end with 9."), ] additional_answers = [ - '^\\d[0-8]$', + "^\\d[0-8]$", ] problem = self.build_problem( - answer="Michigan", - case_sensitive=False, - hints=hints, - additional_answers=additional_answers, - regexp=True + answer="Michigan", case_sensitive=False, hints=hints, additional_answers=additional_answers, regexp=True ) # We should get a hint for Wisconsin - input_dict = {'1_2_1': 'Wisconsin'} + input_dict = {"1_2_1": "Wisconsin"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'The state capital of Wisconsin is Madison' + assert correct_map.get_hint("1_2_1") == "The state capital of Wisconsin is Madison" # We should get a hint for Minnesota - input_dict = {'1_2_1': 'Minnesota'} + input_dict = {"1_2_1": "Minnesota"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'The state capital of Minnesota is St. Paul' + assert correct_map.get_hint("1_2_1") == "The state capital of Minnesota is St. Paul" # We should NOT get a hint for Michigan (the correct answer) - input_dict = {'1_2_1': 'Michigan'} + input_dict = {"1_2_1": "Michigan"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == '' + assert correct_map.get_hint("1_2_1") == "" # We should NOT get a hint for any other string - input_dict = {'1_2_1': 'California'} + input_dict = {"1_2_1": "California"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == '' + assert correct_map.get_hint("1_2_1") == "" # We should get the same hint for each answer for answer in different_student_answers: - input_dict = {'1_2_1': answer} + input_dict = {"1_2_1": answer} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'First letter of correct answer is M.' + assert correct_map.get_hint("1_2_1") == "First letter of correct answer is M." - input_dict = {'1_2_1': '59'} + input_dict = {"1_2_1": "59"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'Should not end with 9.' + assert correct_map.get_hint("1_2_1") == "Should not end with 9." - input_dict = {'1_2_1': '57'} + input_dict = {"1_2_1": "57"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == '' + assert correct_map.get_hint("1_2_1") == "" def test_computed_hints(self): problem = self.build_problem( answer="Michigan", hintfn="gimme_a_hint", - script=textwrap.dedent(""" + script=textwrap.dedent( + """ def gimme_a_hint(answer_ids, student_answers, new_cmap, old_cmap): aid = answer_ids[0] answer = student_answers[aid] new_cmap.set_hint_and_mode(aid, answer+"??", "always") - """) + """ + ), ) - input_dict = {'1_2_1': 'Hello'} + input_dict = {"1_2_1": "Hello"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_hint('1_2_1') == 'Hello??' + assert correct_map.get_hint("1_2_1") == "Hello??" def test_hint_function_randomization(self): # The hint function should get the seed from the problem. problem = self.build_problem( answer="1", hintfn="gimme_a_random_hint", - script=textwrap.dedent(""" + script=textwrap.dedent( + """ def gimme_a_random_hint(answer_ids, student_answers, new_cmap, old_cmap): answer = {code} new_cmap.set_hint_and_mode(answer_ids[0], answer, "always") - """.format(code=self._get_random_number_code())) + """.format( + code=self._get_random_number_code() + ) + ), ) - correct_map = problem.grade_answers({'1_2_1': '2'}) - hint = correct_map.get_hint('1_2_1') + correct_map = problem.grade_answers({"1_2_1": "2"}) + hint = correct_map.get_hint("1_2_1") assert hint == self._get_random_number_result(problem.seed) def test_empty_answer_graded_as_incorrect(self): @@ -941,16 +914,15 @@ def setUp(self): super(CodeResponseTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments grader_payload = json.dumps({"grader": "ps04/grade_square.py"}) - self.problem = self.build_problem(initial_display="def square(x):", - answer_display="answer", - grader_payload=grader_payload, - num_responses=2) + self.problem = self.build_problem( + initial_display="def square(x):", answer_display="answer", grader_payload=grader_payload, num_responses=2 + ) @staticmethod def make_queuestate(key, time): """Create queuestate dict""" timestr = datetime.strftime(time, dateformat) - return {'key': key, 'time': timestr} + return {"key": key, "time": timestr} def test_is_queued(self): """ @@ -977,9 +949,9 @@ def test_is_queued(self): assert self.problem.is_queued() is True def test_update_score(self): - ''' + """ Test whether LoncapaProblem.update_score can deliver queued result to the right subproblem - ''' + """ answer_ids = sorted(self.problem.get_question_answers()) # CodeResponse requires internal CorrectMap state. Build it now in the queued state @@ -990,15 +962,17 @@ def test_update_score(self): old_cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) # Message format common to external graders - grader_msg = 'MESSAGE' # Must be valid XML - correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg}) + grader_msg = "MESSAGE" # Must be valid XML + correct_score_msg = json.dumps({"correct": True, "score": 1, "msg": grader_msg}) + incorrect_score_msg = json.dumps({"correct": False, "score": 0, "msg": grader_msg}) - xserver_msgs = {'correct': correct_score_msg, - 'incorrect': incorrect_score_msg, } + xserver_msgs = { + "correct": correct_score_msg, + "incorrect": incorrect_score_msg, + } # Incorrect queuekey, state should not be updated - for correctness in ['correct', 'incorrect']: + for correctness in ["correct", "incorrect"]: self.problem.correct_map = CorrectMap() self.problem.correct_map.update(old_cmap) # Deep copy @@ -1011,15 +985,17 @@ def test_update_score(self): # Should be still queued, since message undelivered # lint-amnesty, pylint: disable=line-too-long # Correct queuekey, state should be updated - for correctness in ['correct', 'incorrect']: + for correctness in ["correct", "incorrect"]: for i, answer_id in enumerate(answer_ids): self.problem.correct_map = CorrectMap() self.problem.correct_map.update(old_cmap) new_cmap = CorrectMap() new_cmap.update(old_cmap) - npoints = 1 if correctness == 'correct' else 0 - new_cmap.set(answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None) # lint-amnesty, pylint: disable=line-too-long + npoints = 1 if correctness == "correct" else 0 + new_cmap.set( + answer_id=answer_id, npoints=npoints, correctness=correctness, msg=grader_msg, queuestate=None + ) # lint-amnesty, pylint: disable=line-too-long self.problem.update_score(xserver_msgs[correctness], queuekey=1000 + i) assert self.problem.correct_map.get_dict() == new_cmap.get_dict() @@ -1033,9 +1009,9 @@ def test_update_score(self): # Should be queued, message undelivered # lint-amnesty, pylint: disable=line-too-long def test_recentmost_queuetime(self): - ''' + """ Test whether the LoncapaProblem knows about the time of queue requests - ''' + """ answer_ids = sorted(self.problem.get_question_answers()) # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state @@ -1056,44 +1032,50 @@ def test_recentmost_queuetime(self): self.problem.correct_map.update(cmap) # Queue state only tracks up to second - latest_timestamp = datetime.strptime( - datetime.strftime(latest_timestamp, dateformat), dateformat - ).replace(tzinfo=UTC) + latest_timestamp = datetime.strptime(datetime.strftime(latest_timestamp, dateformat), dateformat).replace( + tzinfo=UTC + ) assert self.problem.get_recentmost_queuetime() == latest_timestamp def test_convert_files_to_filenames(self): - ''' + """ Test whether file objects are converted to filenames without altering other structures - ''' + """ problem_file = os.path.join(os.path.dirname(__file__), "test_files/filename_convert_test.txt") with open(problem_file) as fp: - answers_with_file = {'1_2_1': 'String-based answer', - '1_3_1': ['answer1', 'answer2', 'answer3'], - '1_4_1': [fp, fp]} + answers_with_file = { + "1_2_1": "String-based answer", + "1_3_1": ["answer1", "answer2", "answer3"], + "1_4_1": [fp, fp], + } answers_converted = convert_files_to_filenames(answers_with_file) - assert answers_converted['1_2_1'] == 'String-based answer' - assert answers_converted['1_3_1'] == ['answer1', 'answer2', 'answer3'] - assert answers_converted['1_4_1'] == [fp.name, fp.name] + assert answers_converted["1_2_1"] == "String-based answer" + assert answers_converted["1_3_1"] == ["answer1", "answer2", "answer3"] + assert answers_converted["1_4_1"] == [fp.name, fp.name] def test_parse_score_msg_of_responder(self): """ Test whether LoncapaProblem._parse_score_msg correcly parses valid HTML5 html. """ valid_grader_msgs = [ - 'MESSAGE', # Valid XML - textwrap.dedent(""" + "MESSAGE", # Valid XML + textwrap.dedent( + """
                  Right click here and click \"Save As\" to download the file
                    - """).replace('\n', ''), # Valid HTML5 real case Matlab response, invalid XML - '' # Invalid XML, but will be parsed by html5lib to + """ + ).replace( + "\n", "" + ), # Valid HTML5 real case Matlab response, invalid XML + "", # Invalid XML, but will be parsed by html5lib to ] invalid_grader_msgs = [ - '\b

                    ', # invalid special character + "\b

                    ", # invalid special character ] answer_ids = sorted(self.problem.get_question_answers()) @@ -1106,27 +1088,33 @@ def test_parse_score_msg_of_responder(self): old_cmap.update(CorrectMap(answer_id=answer_id, queuestate=queuestate)) for grader_msg in valid_grader_msgs: - correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg}) - xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, } + correct_score_msg = json.dumps({"correct": True, "score": 1, "msg": grader_msg}) + incorrect_score_msg = json.dumps({"correct": False, "score": 0, "msg": grader_msg}) + xserver_msgs = { + "correct": correct_score_msg, + "incorrect": incorrect_score_msg, + } for i, answer_id in enumerate(answer_ids): self.problem.correct_map = CorrectMap() self.problem.correct_map.update(old_cmap) - output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i) - assert output[answer_id]['msg'] == grader_msg + output = self.problem.update_score(xserver_msgs["correct"], queuekey=1000 + i) + assert output[answer_id]["msg"] == grader_msg for grader_msg in invalid_grader_msgs: - correct_score_msg = json.dumps({'correct': True, 'score': 1, 'msg': grader_msg}) - incorrect_score_msg = json.dumps({'correct': False, 'score': 0, 'msg': grader_msg}) - xserver_msgs = {'correct': correct_score_msg, 'incorrect': incorrect_score_msg, } + correct_score_msg = json.dumps({"correct": True, "score": 1, "msg": grader_msg}) + incorrect_score_msg = json.dumps({"correct": False, "score": 0, "msg": grader_msg}) + xserver_msgs = { + "correct": correct_score_msg, + "incorrect": incorrect_score_msg, + } for i, answer_id in enumerate(answer_ids): self.problem.correct_map = CorrectMap() self.problem.correct_map.update(old_cmap) - output = self.problem.update_score(xserver_msgs['correct'], queuekey=1000 + i) - assert output[answer_id]['msg'] == 'Invalid grader reply. Please contact the course staff.' + output = self.problem.update_score(xserver_msgs["correct"], queuekey=1000 + i) + assert output[answer_id]["msg"] == "Invalid grader reply. Please contact the course staff." @use_unsafe_codejail() @@ -1134,151 +1122,122 @@ class ChoiceResponseTest(ResponseTest): # pylint: disable=missing-class-docstri xml_factory_class = ChoiceResponseXMLFactory def test_radio_group_grade(self): - problem = self.build_problem(choice_type='radio', - choices=[False, True, False]) + problem = self.build_problem(choice_type="radio", choices=[False, True, False]) # Check that we get the expected results - self.assert_grade(problem, 'choice_0', 'incorrect') - self.assert_grade(problem, 'choice_1', 'correct') - self.assert_grade(problem, 'choice_2', 'incorrect') + self.assert_grade(problem, "choice_0", "incorrect") + self.assert_grade(problem, "choice_1", "correct") + self.assert_grade(problem, "choice_2", "incorrect") # No choice 3 exists --> mark incorrect - self.assert_grade(problem, 'choice_3', 'incorrect') + self.assert_grade(problem, "choice_3", "incorrect") def test_checkbox_group_grade(self): - problem = self.build_problem(choice_type='checkbox', - choices=[False, True, True]) + problem = self.build_problem(choice_type="checkbox", choices=[False, True, True]) # Check that we get the expected results # (correct if and only if BOTH correct choices chosen) - self.assert_grade(problem, ['choice_1', 'choice_2'], 'correct') - self.assert_grade(problem, 'choice_1', 'incorrect') - self.assert_grade(problem, 'choice_2', 'incorrect') - self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') - self.assert_grade(problem, ['choice_0', 'choice_2'], 'incorrect') + self.assert_grade(problem, ["choice_1", "choice_2"], "correct") + self.assert_grade(problem, "choice_1", "incorrect") + self.assert_grade(problem, "choice_2", "incorrect") + self.assert_grade(problem, ["choice_0", "choice_1"], "incorrect") + self.assert_grade(problem, ["choice_0", "choice_2"], "incorrect") # No choice 3 exists --> mark incorrect - self.assert_grade(problem, 'choice_3', 'incorrect') + self.assert_grade(problem, "choice_3", "incorrect") def test_checkbox_group_valid_grading_schemes(self): # Checkbox-type problems only allow one partial credit scheme. # Change this test if that changes. problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True], - credit_type='edc,halves,bongo' + choice_type="checkbox", choices=[False, False, True, True], credit_type="edc,halves,bongo" ) with pytest.raises(LoncapaProblemError): - input_dict = {'1_2_1': 'choice_1'} + input_dict = {"1_2_1": "choice_1"} problem.grade_answers(input_dict) # 'bongo' is not a valid grading scheme. - problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True], - credit_type='bongo' - ) + problem = self.build_problem(choice_type="checkbox", choices=[False, False, True, True], credit_type="bongo") with pytest.raises(LoncapaProblemError): - input_dict = {'1_2_1': 'choice_1'} + input_dict = {"1_2_1": "choice_1"} problem.grade_answers(input_dict) def test_checkbox_group_partial_credit_grade(self): # First: Every Decision Counts grading style - problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True], - credit_type='edc' - ) + problem = self.build_problem(choice_type="checkbox", choices=[False, False, True, True], credit_type="edc") # Check that we get the expected results # (correct if and only if BOTH correct choices chosen) # (partially correct if at least one choice is right) # (incorrect if totally wrong) - self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') - self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct') - self.assert_grade(problem, 'choice_0', 'partially-correct') - self.assert_grade(problem, 'choice_2', 'partially-correct') - self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'partially-correct') + self.assert_grade(problem, ["choice_0", "choice_1"], "incorrect") + self.assert_grade(problem, ["choice_2", "choice_3"], "correct") + self.assert_grade(problem, "choice_0", "partially-correct") + self.assert_grade(problem, "choice_2", "partially-correct") + self.assert_grade(problem, ["choice_0", "choice_1", "choice_2", "choice_3"], "partially-correct") # Second: Halves grading style - problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True], - credit_type='halves' - ) + problem = self.build_problem(choice_type="checkbox", choices=[False, False, True, True], credit_type="halves") # Check that we get the expected results # (correct if and only if BOTH correct choices chosen) # (partially correct on one error) # (incorrect for more errors, at least with this # of choices.) - self.assert_grade(problem, ['choice_0', 'choice_1'], 'incorrect') - self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct') - self.assert_grade(problem, 'choice_2', 'partially-correct') - self.assert_grade(problem, ['choice_1', 'choice_2', 'choice_3'], 'partially-correct') - self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'incorrect') + self.assert_grade(problem, ["choice_0", "choice_1"], "incorrect") + self.assert_grade(problem, ["choice_2", "choice_3"], "correct") + self.assert_grade(problem, "choice_2", "partially-correct") + self.assert_grade(problem, ["choice_1", "choice_2", "choice_3"], "partially-correct") + self.assert_grade(problem, ["choice_0", "choice_1", "choice_2", "choice_3"], "incorrect") # Third: Halves grading style with more options problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True, False], - credit_type='halves' + choice_type="checkbox", choices=[False, False, True, True, False], credit_type="halves" ) # Check that we get the expected results # (2 errors allowed with 5+ choices) - self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_4'], 'incorrect') - self.assert_grade(problem, ['choice_2', 'choice_3'], 'correct') - self.assert_grade(problem, 'choice_2', 'partially-correct') - self.assert_grade(problem, ['choice_1', 'choice_2', 'choice_3'], 'partially-correct') - self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3'], 'partially-correct') - self.assert_grade(problem, ['choice_0', 'choice_1', 'choice_2', 'choice_3', 'choice_4'], 'incorrect') + self.assert_grade(problem, ["choice_0", "choice_1", "choice_4"], "incorrect") + self.assert_grade(problem, ["choice_2", "choice_3"], "correct") + self.assert_grade(problem, "choice_2", "partially-correct") + self.assert_grade(problem, ["choice_1", "choice_2", "choice_3"], "partially-correct") + self.assert_grade(problem, ["choice_0", "choice_1", "choice_2", "choice_3"], "partially-correct") + self.assert_grade(problem, ["choice_0", "choice_1", "choice_2", "choice_3", "choice_4"], "incorrect") def test_checkbox_group_partial_points_grade(self): # Ensure that we get the expected number of points # Using assertAlmostEqual to avoid floating point issues # First: Every Decision Counts grading style - problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True], - credit_type='edc' - ) + problem = self.build_problem(choice_type="checkbox", choices=[False, False, True, True], credit_type="edc") - correct_map = problem.grade_answers({'1_2_1': 'choice_2'}) - assert round(correct_map.get_npoints('1_2_1') - 0.75, 7) >= 0 + correct_map = problem.grade_answers({"1_2_1": "choice_2"}) + assert round(correct_map.get_npoints("1_2_1") - 0.75, 7) >= 0 # Second: Halves grading style - problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True], - credit_type='halves' - ) + problem = self.build_problem(choice_type="checkbox", choices=[False, False, True, True], credit_type="halves") - correct_map = problem.grade_answers({'1_2_1': 'choice_2'}) - assert round(correct_map.get_npoints('1_2_1') - 0.5, 7) >= 0 + correct_map = problem.grade_answers({"1_2_1": "choice_2"}) + assert round(correct_map.get_npoints("1_2_1") - 0.5, 7) >= 0 # Third: Halves grading style with more options problem = self.build_problem( - choice_type='checkbox', - choices=[False, False, True, True, False], - credit_type='halves' + choice_type="checkbox", choices=[False, False, True, True, False], credit_type="halves" ) - correct_map = problem.grade_answers({'1_2_1': 'choice_2,choice4'}) - assert round(correct_map.get_npoints('1_2_1') - 0.25, 7) >= 0 + correct_map = problem.grade_answers({"1_2_1": "choice_2,choice4"}) + assert round(correct_map.get_npoints("1_2_1") - 0.25, 7) >= 0 def test_grade_with_no_checkbox_selected(self): """ Test that answer marked as incorrect if no checkbox selected. """ - problem = self.build_problem( - choice_type='checkbox', choices=[False, False, False] - ) + problem = self.build_problem(choice_type="checkbox", choices=[False, False, False]) correct_map = problem.grade_answers({}) - assert correct_map.get_correctness('1_2_1') == 'incorrect' + assert correct_map.get_correctness("1_2_1") == "incorrect" def test_contextualized_choices(self): - script = textwrap.dedent(""" + script = textwrap.dedent( + """ a = 6 b = 4 c = a + b @@ -1287,15 +1246,14 @@ def test_contextualized_choices(self): ok1 = c % 3 == 0 # check remainder modulo 3 ok2 = c % 5 == 0 # check remainder modulo 5 ok3 = not any((ok0, ok1, ok2)) - """) + """ + ) choices = ["$ok0", "$ok1", "$ok2", "$ok3"] - problem = self.build_problem(script=script, - choice_type='checkbox', - choices=choices) + problem = self.build_problem(script=script, choice_type="checkbox", choices=choices) # Ensure the expected correctness - self.assert_grade(problem, ['choice_0', 'choice_2'], 'correct') - self.assert_grade(problem, ['choice_1', 'choice_3'], 'incorrect') + self.assert_grade(problem, ["choice_0", "choice_2"], "correct") + self.assert_grade(problem, ["choice_1", "choice_3"], "incorrect") @use_unsafe_codejail() @@ -1308,10 +1266,10 @@ class NumericalResponseTest(ResponseTest): # pylint: disable=missing-class-docs def test_grade_range_tolerance(self): problem_setup = [ # [given_answer, [list of correct responses], [list of incorrect responses]] - ['[5, 7)', ['5', '6', '6.999'], ['4.999', '7']], - ['[1.6e-5, 1.9e24)', ['0.000016', '1.6*10^-5', '1.59e24'], ['1.59e-5', '1.9e24', '1.9*10^24']], - ['[0, 1.6e-5]', ['1.6*10^-5'], ["2"]], - ['(1.6e-5, 10]', ["2"], ['1.6*10^-5']], + ["[5, 7)", ["5", "6", "6.999"], ["4.999", "7"]], + ["[1.6e-5, 1.9e24)", ["0.000016", "1.6*10^-5", "1.59e24"], ["1.59e-5", "1.9e24", "1.9*10^24"]], + ["[0, 1.6e-5]", ["1.6*10^-5"], ["2"]], + ["(1.6e-5, 10]", ["2"], ["1.6*10^-5"]], ] for given_answer, correct_responses, incorrect_responses in problem_setup: problem = self.build_problem(answer=given_answer) @@ -1321,49 +1279,47 @@ def test_additional_answer_grading(self): """ Test additional answers are graded correct with their associated correcthint. """ - primary_answer = '100' - primary_correcthint = 'primary feedback' + primary_answer = "100" + primary_correcthint = "primary feedback" additional_answers = { - '1': '1. additional feedback', - '2': '2. additional feedback', - '4': '4. additional feedback', - '5': '' + "1": "1. additional feedback", + "2": "2. additional feedback", + "4": "4. additional feedback", + "5": "", } problem = self.build_problem( - answer=primary_answer, - additional_answers=additional_answers, - correcthint=primary_correcthint + answer=primary_answer, additional_answers=additional_answers, correcthint=primary_correcthint ) # Assert primary answer is graded correctly. - correct_map = problem.grade_answers({'1_2_1': primary_answer}) - assert correct_map.get_correctness('1_2_1') == 'correct' - assert primary_correcthint in correct_map.get_msg('1_2_1') + correct_map = problem.grade_answers({"1_2_1": primary_answer}) + assert correct_map.get_correctness("1_2_1") == "correct" + assert primary_correcthint in correct_map.get_msg("1_2_1") # Assert additional answers are graded correct for answer, correcthint in additional_answers.items(): - correct_map = problem.grade_answers({'1_2_1': answer}) - assert correct_map.get_correctness('1_2_1') == 'correct' - assert correcthint in correct_map.get_msg('1_2_1') + correct_map = problem.grade_answers({"1_2_1": answer}) + assert correct_map.get_correctness("1_2_1") == "correct" + assert correcthint in correct_map.get_msg("1_2_1") def test_additional_answer_get_score(self): """ Test `get_score` is working for additional answers. """ - problem = self.build_problem(answer='100', additional_answers={'1': ''}) + problem = self.build_problem(answer="100", additional_answers={"1": ""}) responder = list(problem.responders.values())[0] # Check primary answer. - new_cmap = responder.get_score({'1_2_1': '100'}) - assert new_cmap.get_correctness('1_2_1') == 'correct' + new_cmap = responder.get_score({"1_2_1": "100"}) + assert new_cmap.get_correctness("1_2_1") == "correct" # Check additional answer. - new_cmap = responder.get_score({'1_2_1': '1'}) - assert new_cmap.get_correctness('1_2_1') == 'correct' + new_cmap = responder.get_score({"1_2_1": "1"}) + assert new_cmap.get_correctness("1_2_1") == "correct" # Check any wrong answer. - new_cmap = responder.get_score({'1_2_1': '2'}) - assert new_cmap.get_correctness('1_2_1') == 'incorrect' + new_cmap = responder.get_score({"1_2_1": "2"}) + assert new_cmap.get_correctness("1_2_1") == "incorrect" def test_grade_range_tolerance_partial_credit(self): problem_setup = [ @@ -1371,63 +1327,43 @@ def test_grade_range_tolerance_partial_credit(self): # [list of correct responses], # [list of incorrect responses], # [list of partially correct responses]] - [ - '[5, 7)', - ['5', '6', '6.999'], - ['0', '100'], - ['4', '8'] - ], - [ - '[1.6e-5, 1.9e24)', - ['0.000016', '1.6*10^-5', '1.59e24'], - ['-1e26', '1.9e26', '1.9*10^26'], - ['0', '2e24'] - ], - [ - '[0, 1.6e-5]', - ['1.6*10^-5'], - ['2'], - ['1.9e-5', '-1e-6'] - ], - [ - '(1.6e-5, 10]', - ['2'], - ['-20', '30'], - ['-1', '12'] - ], + ["[5, 7)", ["5", "6", "6.999"], ["0", "100"], ["4", "8"]], + ["[1.6e-5, 1.9e24)", ["0.000016", "1.6*10^-5", "1.59e24"], ["-1e26", "1.9e26", "1.9*10^26"], ["0", "2e24"]], + ["[0, 1.6e-5]", ["1.6*10^-5"], ["2"], ["1.9e-5", "-1e-6"]], + ["(1.6e-5, 10]", ["2"], ["-20", "30"], ["-1", "12"]], ] for given_answer, correct_responses, incorrect_responses, partial_responses in problem_setup: - problem = self.build_problem(answer=given_answer, credit_type='close') + problem = self.build_problem(answer=given_answer, credit_type="close") self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses) def test_grade_range_tolerance_exceptions(self): # no complex number in range tolerance staff answer - problem = self.build_problem(answer='[1j, 5]') - input_dict = {'1_2_1': '3'} + problem = self.build_problem(answer="[1j, 5]") + input_dict = {"1_2_1": "3"} with pytest.raises(StudentInputError): problem.grade_answers(input_dict) # no complex numbers in student ansers to range tolerance problems - problem = self.build_problem(answer='(1, 5)') - input_dict = {'1_2_1': '1*J'} + problem = self.build_problem(answer="(1, 5)") + input_dict = {"1_2_1": "1*J"} with pytest.raises(StudentInputError): problem.grade_answers(input_dict) # test isnan student input: no exception, # but problem should be graded as incorrect - problem = self.build_problem(answer='(1, 5)') - input_dict = {'1_2_1': ''} + problem = self.build_problem(answer="(1, 5)") + input_dict = {"1_2_1": ""} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - assert correctness == 'incorrect' + correctness = correct_map.get_correctness("1_2_1") + assert correctness == "incorrect" # test invalid range tolerance answer with pytest.raises(StudentInputError): - problem = self.build_problem(answer='(1 5)') + problem = self.build_problem(answer="(1 5)") # test empty boundaries - problem = self.build_problem(answer='(1, ]') - input_dict = {'1_2_1': '3'} + problem = self.build_problem(answer="(1, ]") + input_dict = {"1_2_1": "3"} with pytest.raises(StudentInputError): problem.grade_answers(input_dict) @@ -1439,34 +1375,21 @@ def test_grade_exact(self): def test_grade_partial(self): # First: "list"-style grading scheme. - problem = self.build_problem( - answer=4, - credit_type='list', - partial_answers='2,8,-4' - ) + problem = self.build_problem(answer=4, credit_type="list", partial_answers="2,8,-4") correct_responses = ["4", "4.0"] incorrect_responses = ["1", "3", "4.1", "0", "-2"] partial_responses = ["2", "2.0", "-4", "-4.0", "8", "8.0"] self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses) # Second: "close"-style grading scheme. Default range is twice tolerance. - problem = self.build_problem( - answer=4, - tolerance=0.2, - credit_type='close' - ) + problem = self.build_problem(answer=4, tolerance=0.2, credit_type="close") correct_responses = ["4", "4.1", "3.9"] incorrect_responses = ["1", "3", "4.5", "0", "-2"] partial_responses = ["4.3", "3.7"] self.assert_multiple_partial(problem, correct_responses, incorrect_responses, partial_responses) # Third: "close"-style grading scheme with partial_range set. - problem = self.build_problem( - answer=4, - tolerance=0.2, - partial_range=3, - credit_type='close' - ) + problem = self.build_problem(answer=4, tolerance=0.2, partial_range=3, credit_type="close") correct_responses = ["4", "4.1"] incorrect_responses = ["1", "3", "0", "-2"] partial_responses = ["4.5", "3.5"] @@ -1474,11 +1397,7 @@ def test_grade_partial(self): # Fourth: both "list"- and "close"-style grading schemes at once. problem = self.build_problem( - answer=4, - tolerance=0.2, - partial_range=3, - credit_type='close,list', - partial_answers='2,8,-4' + answer=4, tolerance=0.2, partial_range=3, credit_type="close,list", partial_answers="2,8,-4" ) correct_responses = ["4", "4.0"] incorrect_responses = ["1", "3", "0", "-2"] @@ -1487,8 +1406,8 @@ def test_grade_partial(self): def test_numerical_valid_grading_schemes(self): # 'bongo' is not a valid grading scheme. - problem = self.build_problem(answer=4, tolerance=0.1, credit_type='bongo') - input_dict = {'1_2_1': '4'} + problem = self.build_problem(answer=4, tolerance=0.1, credit_type="bongo") + input_dict = {"1_2_1": "4"} with pytest.raises(LoncapaProblemError): problem.grade_answers(input_dict) @@ -1537,7 +1456,7 @@ def test_floats(self): [0.000016, ["1.6*10^-5"], ["0.000165"]], [1.9e24, ["1.9*10^24"], ["1.9001*10^24"]], [2e-15, ["2*10^-15"], [""]], - [3141592653589793238., ["3141592653589793115."], [""]], + [3141592653589793238.0, ["3141592653589793115."], [""]], [0.1234567, ["0.123456", "0.1234561"], ["0.123451"]], [1e-5, ["1e-5", "1.0e-5"], ["-1e-5", "2*1e-5"]], ] @@ -1569,7 +1488,7 @@ def test_grade_with_script(self): def test_raises_zero_division_err(self): """See if division by zero is handled correctly.""" problem = self.build_problem(answer="1") # Answer doesn't matter - input_dict = {'1_2_1': '1/0'} + input_dict = {"1_2_1": "1/0"} with pytest.raises(StudentInputError): problem.grade_answers(input_dict) @@ -1583,9 +1502,9 @@ def test_staff_inputs_expressions(self): def test_staff_inputs_expressions_legacy(self): """Test that staff may enter in a complex number as the answer.""" problem = self.build_problem(answer="1+1j", tolerance=1e-3) - self.assert_grade(problem, '1+j', 'correct') + self.assert_grade(problem, "1+j", "correct") - @mock.patch('xmodule.capa.responsetypes.log') + @mock.patch("xmodule.capa.responsetypes.log") def test_staff_inputs_bad_syntax(self, mock_log): """Test that staff may enter in a complex number as the answer.""" staff_ans = "clearly bad syntax )[+1e" @@ -1593,13 +1512,11 @@ def test_staff_inputs_bad_syntax(self, mock_log): msg = "There was a problem with the staff answer to this problem" with self.assertRaisesRegex(StudentInputError, msg): - self.assert_grade(problem, '1+j', 'correct') + self.assert_grade(problem, "1+j", "correct") - mock_log.debug.assert_called_once_with( - "Content error--answer '%s' is not a valid number", staff_ans - ) + mock_log.debug.assert_called_once_with("Content error--answer '%s' is not a valid number", staff_ans) - @mock.patch('xmodule.capa.responsetypes.log') + @mock.patch("xmodule.capa.responsetypes.log") def test_responsetype_i18n(self, mock_log): # lint-amnesty, pylint: disable=unused-argument """Test that LoncapaSystem has an i18n that works.""" staff_ans = "clearly bad syntax )[+1e" @@ -1607,17 +1524,19 @@ def test_responsetype_i18n(self, mock_log): # lint-amnesty, pylint: disable=unu class FakeTranslations(object): """A fake gettext.Translations object.""" + def ugettext(self, text): """Return the 'translation' of `text`.""" if text == "There was a problem with the staff answer to this problem.": text = "TRANSLATED!" return text + gettext = ugettext problem.capa_system.i18n = FakeTranslations() with self.assertRaisesRegex(StudentInputError, "TRANSLATED!"): - self.assert_grade(problem, '1+j', 'correct') + self.assert_grade(problem, "1+j", "correct") def test_grade_infinity(self): """ @@ -1627,23 +1546,23 @@ def test_grade_infinity(self): pass with any arbitrarily large student answer. """ mapping = { - 'some big input': float('inf'), - 'some neg input': -float('inf'), - 'weird NaN input': float('nan'), - '4': 4 + "some big input": float("inf"), + "some neg input": -float("inf"), + "weird NaN input": float("nan"), + "4": 4, } def evaluator_side_effect(_, __, math_string): """Look up the given response for `math_string`.""" return mapping[math_string] - problem = self.build_problem(answer=4, tolerance='10%') + problem = self.build_problem(answer=4, tolerance="10%") - with mock.patch('xmodule.capa.responsetypes.evaluator') as mock_eval: + with mock.patch("xmodule.capa.responsetypes.evaluator") as mock_eval: mock_eval.side_effect = evaluator_side_effect - self.assert_grade(problem, 'some big input', 'incorrect') - self.assert_grade(problem, 'some neg input', 'incorrect') - self.assert_grade(problem, 'weird NaN input', 'incorrect') + self.assert_grade(problem, "some big input", "incorrect") + self.assert_grade(problem, "some neg input", "incorrect") + self.assert_grade(problem, "weird NaN input", "incorrect") def test_err_handling(self): """ @@ -1652,39 +1571,42 @@ def test_err_handling(self): problem = self.build_problem(answer=4) errors = [ # (exception raised, message to student) - (calc.UndefinedVariable("Invalid Input: x not permitted in answer as a variable"), - r"Invalid Input: x not permitted in answer as a variable"), + ( + calc.UndefinedVariable("Invalid Input: x not permitted in answer as a variable"), + r"Invalid Input: x not permitted in answer as a variable", + ), (ValueError("factorial() mess-up"), "Factorial function evaluated outside its domain"), (ValueError(), "Could not interpret '.*' as a number"), (pyparsing.ParseException("oopsie"), "Invalid math syntax"), - (ZeroDivisionError(), "Could not interpret '.*' as a number") + (ZeroDivisionError(), "Could not interpret '.*' as a number"), ] - with mock.patch('xmodule.capa.responsetypes.evaluator') as mock_eval: + with mock.patch("xmodule.capa.responsetypes.evaluator") as mock_eval: for err, msg_regex in errors: def evaluator_side_effect(_, __, math_string): """Raise an error only for the student input.""" - if math_string != '4': + if math_string != "4": raise err # lint-amnesty, pylint: disable=cell-var-from-loop + mock_eval.side_effect = evaluator_side_effect with self.assertRaisesRegex(StudentInputError, msg_regex): - problem.grade_answers({'1_2_1': 'foobar'}) + problem.grade_answers({"1_2_1": "foobar"}) def test_compare_answer(self): """Tests the answer compare function.""" problem = self.build_problem(answer="42") responder = list(problem.responders.values())[0] - assert responder.compare_answer('48', '8*6') - assert not responder.compare_answer('48', '9*5') + assert responder.compare_answer("48", "8*6") + assert not responder.compare_answer("48", "9*5") def test_validate_answer(self): """Tests the answer validation function.""" problem = self.build_problem(answer="42") responder = list(problem.responders.values())[0] - assert responder.validate_answer('23.5') - assert not responder.validate_answer('fish') + assert responder.validate_answer("23.5") + assert not responder.validate_answer("fish") @use_unsafe_codejail() @@ -1700,40 +1622,42 @@ def test_inline_code(self): problem = self.build_problem(answer=inline_script, expect="42") # Check results - self.assert_grade(problem, '42', 'correct') - self.assert_grade(problem, '0', 'incorrect') + self.assert_grade(problem, "42", "correct") + self.assert_grade(problem, "0", "incorrect") def test_inline_message(self): # Inline code can update the global messages list # to pass messages to the CorrectMap for a particular input # The code can also set the global overall_message (str) # to pass a message that applies to the whole response - inline_script = textwrap.dedent(""" + inline_script = textwrap.dedent( + """ messages[0] = "Test Message" overall_message = "Overall message" - """) + """ + ) problem = self.build_problem(answer=inline_script) - input_dict = {'1_2_1': '0'} + input_dict = {"1_2_1": "0"} correctmap = problem.grade_answers(input_dict) # Check that the message for the particular input was received - input_msg = correctmap.get_msg('1_2_1') - assert input_msg == 'Test Message' + input_msg = correctmap.get_msg("1_2_1") + assert input_msg == "Test Message" # Check that the overall message (for the whole response) was received overall_msg = correctmap.get_overall_message() - assert overall_msg == 'Overall message' + assert overall_msg == "Overall message" def test_inline_randomization(self): # Make sure the seed from the problem gets fed into the script execution. inline_script = "messages[0] = {code}".format(code=self._get_random_number_code()) problem = self.build_problem(answer=inline_script) - input_dict = {'1_2_1': '0'} + input_dict = {"1_2_1": "0"} correctmap = problem.grade_answers(input_dict) - input_msg = correctmap.get_msg('1_2_1') + input_msg = correctmap.get_msg("1_2_1") assert input_msg == self._get_random_number_result(problem.seed) def test_function_code_single_input(self): @@ -1747,7 +1671,8 @@ def test_function_code_single_input(self): # The function should return a dict of the form # { 'ok': BOOL or STRING, 'msg': STRING } (no 'grade_decimal' key to test that it's optional) # - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): partial_credit = '21' if answer_given == expect: @@ -1757,44 +1682,45 @@ def check_func(expect, answer_given): else: retval = False return {'ok': retval, 'msg': 'Message text'} - """) + """ + ) problem = self.build_problem(script=script, cfn="check_func", expect="42") # Correct answer - input_dict = {'1_2_1': '42'} + input_dict = {"1_2_1": "42"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') - npoints = correct_map.get_npoints('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") + npoints = correct_map.get_npoints("1_2_1") - assert correctness == 'correct' - assert msg == 'Message text' + assert correctness == "correct" + assert msg == "Message text" assert npoints == 1 # Partially Credit answer - input_dict = {'1_2_1': '21'} + input_dict = {"1_2_1": "21"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') - npoints = correct_map.get_npoints('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") + npoints = correct_map.get_npoints("1_2_1") - assert correctness == 'partially-correct' - assert msg == 'Message text' + assert correctness == "partially-correct" + assert msg == "Message text" assert 0 <= npoints <= 1 # Incorrect answer - input_dict = {'1_2_1': '0'} + input_dict = {"1_2_1": "0"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') - npoints = correct_map.get_npoints('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") + npoints = correct_map.get_npoints("1_2_1") - assert correctness == 'incorrect' - assert msg == 'Message text' + assert correctness == "incorrect" + assert msg == "Message text" assert npoints == 0 def test_function_code_single_input_decimal_score(self): @@ -1808,7 +1734,8 @@ def test_function_code_single_input_decimal_score(self): # The function should return a dict of the form # { 'ok': BOOL or STRING, 'msg': STRING, 'grade_decimal': FLOAT } # - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): partial_credit = '21' if answer_given == expect: @@ -1825,54 +1752,57 @@ def check_func(expect, answer_given): 'msg': 'Message text', 'grade_decimal': score, } - """) + """ + ) problem = self.build_problem(script=script, cfn="check_func", expect="42") # Correct answer - input_dict = {'1_2_1': '42'} + input_dict = {"1_2_1": "42"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_npoints('1_2_1') == 0.9 - assert correct_map.get_correctness('1_2_1') == 'correct' + assert correct_map.get_npoints("1_2_1") == 0.9 + assert correct_map.get_correctness("1_2_1") == "correct" # Incorrect answer - input_dict = {'1_2_1': '43'} + input_dict = {"1_2_1": "43"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_npoints('1_2_1') == 0.1 - assert correct_map.get_correctness('1_2_1') == 'incorrect' + assert correct_map.get_npoints("1_2_1") == 0.1 + assert correct_map.get_correctness("1_2_1") == "incorrect" # Partially Correct answer - input_dict = {'1_2_1': '21'} + input_dict = {"1_2_1": "21"} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_npoints('1_2_1') == 0.5 - assert correct_map.get_correctness('1_2_1') == 'partially-correct' + assert correct_map.get_npoints("1_2_1") == 0.5 + assert correct_map.get_correctness("1_2_1") == "partially-correct" def test_script_context(self): # Ensure that python script variables can be used in the "expect" and "answer" fields, - script = script = textwrap.dedent(""" + script = script = textwrap.dedent( + """ expected_ans = 42 def check_func(expect, answer_given): return answer_given == expect - """) + """ + ) problems = ( self.build_problem(script=script, cfn="check_func", expect="$expected_ans"), - self.build_problem(script=script, cfn="check_func", answer_attr="$expected_ans") + self.build_problem(script=script, cfn="check_func", answer_attr="$expected_ans"), ) - input_dict = {'1_2_1': '42'} + input_dict = {"1_2_1": "42"} for problem in problems: correctmap = problem.grade_answers(input_dict) # CustomResponse also adds 'expect' to the problem context; check that directly first: - assert problem.context['expect'] == '42' + assert problem.context["expect"] == "42" # Also make sure the problem was graded correctly: - correctness = correctmap.get_correctness('1_2_1') - assert correctness == 'correct' + correctness = correctmap.get_correctness("1_2_1") + assert correctness == "correct" def test_function_code_multiple_input_no_msg(self): @@ -1881,7 +1811,8 @@ def test_function_code_multiple_input_no_msg(self): # If true, mark all the inputs correct # If one is true but not the other, mark all partially correct # If false, mark all the inputs incorrect - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): if answer_given[0] == expect and answer_given[1] == expect: retval = True @@ -1890,42 +1821,42 @@ def check_func(expect, answer_given): else: retval = False return retval - """) + """ + ) - problem = self.build_problem(script=script, cfn="check_func", - expect="42", num_inputs=2) + problem = self.build_problem(script=script, cfn="check_func", expect="42", num_inputs=2) # Correct answer -- expect both inputs marked correct - input_dict = {'1_2_1': '42', '1_2_2': '42'} + input_dict = {"1_2_1": "42", "1_2_2": "42"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - assert correctness == 'correct' + correctness = correct_map.get_correctness("1_2_1") + assert correctness == "correct" - correctness = correct_map.get_correctness('1_2_2') - assert correctness == 'correct' + correctness = correct_map.get_correctness("1_2_2") + assert correctness == "correct" # One answer incorrect -- expect both inputs marked partially correct - input_dict = {'1_2_1': '0', '1_2_2': '42'} + input_dict = {"1_2_1": "0", "1_2_2": "42"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - assert correctness == 'partially-correct' - assert 0 <= correct_map.get_npoints('1_2_1') <= 1 + correctness = correct_map.get_correctness("1_2_1") + assert correctness == "partially-correct" + assert 0 <= correct_map.get_npoints("1_2_1") <= 1 - correctness = correct_map.get_correctness('1_2_2') - assert correctness == 'partially-correct' - assert 0 <= correct_map.get_npoints('1_2_2') <= 1 + correctness = correct_map.get_correctness("1_2_2") + assert correctness == "partially-correct" + assert 0 <= correct_map.get_npoints("1_2_2") <= 1 # Both answers incorrect -- expect both inputs marked incorrect - input_dict = {'1_2_1': '0', '1_2_2': '0'} + input_dict = {"1_2_1": "0", "1_2_2": "0"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - assert correctness == 'incorrect' + correctness = correct_map.get_correctness("1_2_1") + assert correctness == "incorrect" - correctness = correct_map.get_correctness('1_2_2') - assert correctness == 'incorrect' + correctness = correct_map.get_correctness("1_2_2") + assert correctness == "incorrect" def test_function_code_multiple_inputs(self): @@ -1940,7 +1871,8 @@ def test_function_code_multiple_inputs(self): # # 'input_list' contains dictionaries representing the correctness # and message for each input. - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): check1 = (int(answer_given[0]) == 1) check2 = (int(answer_given[1]) == 2) @@ -1952,38 +1884,35 @@ def check_func(expect, answer_given): {'ok': check2, 'msg': 'Feedback 2'}, {'ok': check3, 'msg': 'Feedback 3'}, {'ok': check4, 'msg': 'Feedback 4'} ] } - """) - - problem = self.build_problem( - script=script, - cfn="check_func", - num_inputs=4 + """ ) + problem = self.build_problem(script=script, cfn="check_func", num_inputs=4) + # Grade the inputs (one input incorrect) - input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3', '1_2_4': 'four'} + input_dict = {"1_2_1": "-999", "1_2_2": "2", "1_2_3": "3", "1_2_4": "four"} correct_map = problem.grade_answers(input_dict) # Expect that we receive the overall message (for the whole response) - assert correct_map.get_overall_message() == 'Overall message' + assert correct_map.get_overall_message() == "Overall message" # Expect that the inputs were graded individually - assert correct_map.get_correctness('1_2_1') == 'incorrect' - assert correct_map.get_correctness('1_2_2') == 'correct' - assert correct_map.get_correctness('1_2_3') == 'correct' - assert correct_map.get_correctness('1_2_4') == 'partially-correct' + assert correct_map.get_correctness("1_2_1") == "incorrect" + assert correct_map.get_correctness("1_2_2") == "correct" + assert correct_map.get_correctness("1_2_3") == "correct" + assert correct_map.get_correctness("1_2_4") == "partially-correct" # Expect that the inputs were given correct npoints - assert correct_map.get_npoints('1_2_1') == 0 - assert correct_map.get_npoints('1_2_2') == 1 - assert correct_map.get_npoints('1_2_3') == 1 - assert 0 <= correct_map.get_npoints('1_2_4') <= 1 + assert correct_map.get_npoints("1_2_1") == 0 + assert correct_map.get_npoints("1_2_2") == 1 + assert correct_map.get_npoints("1_2_3") == 1 + assert 0 <= correct_map.get_npoints("1_2_4") <= 1 # Expect that we received messages for each individual input - assert correct_map.get_msg('1_2_1') == 'Feedback 1' - assert correct_map.get_msg('1_2_2') == 'Feedback 2' - assert correct_map.get_msg('1_2_3') == 'Feedback 3' - assert correct_map.get_msg('1_2_4') == 'Feedback 4' + assert correct_map.get_msg("1_2_1") == "Feedback 1" + assert correct_map.get_msg("1_2_2") == "Feedback 2" + assert correct_map.get_msg("1_2_3") == "Feedback 3" + assert correct_map.get_msg("1_2_4") == "Feedback 4" def test_function_code_multiple_inputs_decimal_score(self): @@ -1996,7 +1925,8 @@ def test_function_code_multiple_inputs_decimal_score(self): # # # 'input_list' contains dictionaries representing the correctness # and message for each input. - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): check1 = (int(answer_given[0]) == 1) check2 = (int(answer_given[1]) == 2) @@ -2014,28 +1944,30 @@ def check_func(expect, answer_given): {'ok': check4, 'grade_decimal': score4, 'msg': 'Feedback 4'}, ] } - """) + """ + ) problem = self.build_problem(script=script, cfn="check_func", num_inputs=4) # Grade the inputs (one input incorrect) - input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3', '1_2_4': 'four'} + input_dict = {"1_2_1": "-999", "1_2_2": "2", "1_2_3": "3", "1_2_4": "four"} correct_map = problem.grade_answers(input_dict) # Expect that the inputs were graded individually - assert correct_map.get_correctness('1_2_1') == 'incorrect' - assert correct_map.get_correctness('1_2_2') == 'correct' - assert correct_map.get_correctness('1_2_3') == 'correct' - assert correct_map.get_correctness('1_2_4') == 'partially-correct' + assert correct_map.get_correctness("1_2_1") == "incorrect" + assert correct_map.get_correctness("1_2_2") == "correct" + assert correct_map.get_correctness("1_2_3") == "correct" + assert correct_map.get_correctness("1_2_4") == "partially-correct" # Expect that the inputs were given correct npoints - assert correct_map.get_npoints('1_2_1') == 0.1 - assert correct_map.get_npoints('1_2_2') == 0.9 - assert correct_map.get_npoints('1_2_3') == 0.9 - assert correct_map.get_npoints('1_2_4') == 0.7 + assert correct_map.get_npoints("1_2_1") == 0.1 + assert correct_map.get_npoints("1_2_2") == 0.9 + assert correct_map.get_npoints("1_2_3") == 0.9 + assert correct_map.get_npoints("1_2_4") == 0.7 def test_function_code_with_extra_args(self): - script = textwrap.dedent("""\ + script = textwrap.dedent( + """\ def check_func(expect, answer_given, options, dynamath): assert options == "xyzzy", "Options was %r" % options partial_credit = '21' @@ -2046,48 +1978,46 @@ def check_func(expect, answer_given, options, dynamath): else: retval = False return {'ok': retval, 'msg': 'Message text'} - """) + """ + ) problem = self.build_problem( - script=script, - cfn="check_func", - expect="42", - options="xyzzy", - cfn_extra_args="options dynamath" + script=script, cfn="check_func", expect="42", options="xyzzy", cfn_extra_args="options dynamath" ) # Correct answer - input_dict = {'1_2_1': '42'} + input_dict = {"1_2_1": "42"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") - assert correctness == 'correct' - assert msg == 'Message text' + assert correctness == "correct" + assert msg == "Message text" # Partially Correct answer - input_dict = {'1_2_1': '21'} + input_dict = {"1_2_1": "21"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") - assert correctness == 'partially-correct' - assert msg == 'Message text' + assert correctness == "partially-correct" + assert msg == "Message text" # Incorrect answer - input_dict = {'1_2_1': '0'} + input_dict = {"1_2_1": "0"} correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") - assert correctness == 'incorrect' - assert msg == 'Message text' + assert correctness == "incorrect" + assert msg == "Message text" def test_function_code_with_attempt_number(self): - script = textwrap.dedent("""\ + script = textwrap.dedent( + """\ def gradeit(expect, ans, **kwargs): attempt = kwargs["attempt"] message = "This is attempt number {}".format(str(attempt)) @@ -2096,35 +2026,31 @@ def gradeit(expect, ans, **kwargs): { 'ok': True, 'msg': message}, ] } - """) - - problem = self.build_problem( - script=script, - cfn="gradeit", - expect="42", - cfn_extra_args="attempt" + """ ) + problem = self.build_problem(script=script, cfn="gradeit", expect="42", cfn_extra_args="attempt") + # first attempt - input_dict = {'1_2_1': '42'} - problem.context['attempt'] = 1 + input_dict = {"1_2_1": "42"} + problem.context["attempt"] = 1 correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") - assert correctness == 'correct' - assert msg == 'This is attempt number 1' + assert correctness == "correct" + assert msg == "This is attempt number 1" # second attempt - problem.context['attempt'] = 2 + problem.context["attempt"] = 2 correct_map = problem.grade_answers(input_dict) - correctness = correct_map.get_correctness('1_2_1') - msg = correct_map.get_msg('1_2_1') + correctness = correct_map.get_correctness("1_2_1") + msg = correct_map.get_msg("1_2_1") - assert correctness == 'correct' - assert msg == 'This is attempt number 2' + assert correctness == "correct" + assert msg == "This is attempt number 2" def test_multiple_inputs_return_one_status(self): # When given multiple inputs, the 'answer_given' argument @@ -2137,7 +2063,8 @@ def test_multiple_inputs_return_one_status(self): # Since we return a dict describing the status of one input, # we expect that the same 'ok' value is applied to each # of the inputs. - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): check1 = (int(answer_given[0]) == 1) check2 = (int(answer_given[1]) == 2) @@ -2148,54 +2075,56 @@ def check_func(expect, answer_given): else: return {'ok': (check1 and check2 and check3), 'msg': 'Message text'} - """) + """ + ) - problem = self.build_problem(script=script, - cfn="check_func", num_inputs=3) + problem = self.build_problem(script=script, cfn="check_func", num_inputs=3) # Grade the inputs (one input incorrect) - input_dict = {'1_2_1': '-999', '1_2_2': '2', '1_2_3': '3'} + input_dict = {"1_2_1": "-999", "1_2_2": "2", "1_2_3": "3"} correct_map = problem.grade_answers(input_dict) # Everything marked incorrect - assert correct_map.get_correctness('1_2_1') == 'incorrect' - assert correct_map.get_correctness('1_2_2') == 'incorrect' - assert correct_map.get_correctness('1_2_3') == 'incorrect' + assert correct_map.get_correctness("1_2_1") == "incorrect" + assert correct_map.get_correctness("1_2_2") == "incorrect" + assert correct_map.get_correctness("1_2_3") == "incorrect" # Grade the inputs (one input partially correct) - input_dict = {'1_2_1': '-1', '1_2_2': '2', '1_2_3': '3'} + input_dict = {"1_2_1": "-1", "1_2_2": "2", "1_2_3": "3"} correct_map = problem.grade_answers(input_dict) # Everything marked partially correct - assert correct_map.get_correctness('1_2_1') == 'partially-correct' - assert correct_map.get_correctness('1_2_2') == 'partially-correct' - assert correct_map.get_correctness('1_2_3') == 'partially-correct' + assert correct_map.get_correctness("1_2_1") == "partially-correct" + assert correct_map.get_correctness("1_2_2") == "partially-correct" + assert correct_map.get_correctness("1_2_3") == "partially-correct" # Grade the inputs (everything correct) - input_dict = {'1_2_1': '1', '1_2_2': '2', '1_2_3': '3'} + input_dict = {"1_2_1": "1", "1_2_2": "2", "1_2_3": "3"} correct_map = problem.grade_answers(input_dict) # Everything marked incorrect - assert correct_map.get_correctness('1_2_1') == 'correct' - assert correct_map.get_correctness('1_2_2') == 'correct' - assert correct_map.get_correctness('1_2_3') == 'correct' + assert correct_map.get_correctness("1_2_1") == "correct" + assert correct_map.get_correctness("1_2_2") == "correct" + assert correct_map.get_correctness("1_2_3") == "correct" # Message is interpreted as an "overall message" - assert correct_map.get_overall_message() == 'Message text' + assert correct_map.get_overall_message() == "Message text" def test_script_exception_function(self): # Construct a script that will raise an exception - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): raise Exception("Test") - """) + """ + ) problem = self.build_problem(script=script, cfn="check_func") # Expect that an exception gets raised when we check the answer with pytest.raises(ResponseError): - problem.grade_answers({'1_2_1': '42'}) + problem.grade_answers({"1_2_1": "42"}) def test_script_exception_inline(self): @@ -2205,41 +2134,51 @@ def test_script_exception_inline(self): # Expect that an exception gets raised when we check the answer with pytest.raises(ResponseError): - problem.grade_answers({'1_2_1': '42'}) + problem.grade_answers({"1_2_1": "42"}) def test_invalid_dict_exception(self): # Construct a script that passes back an invalid dict format - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): return {'invalid': 'test'} - """) + """ + ) problem = self.build_problem(script=script, cfn="check_func") # Expect that an exception gets raised when we check the answer with pytest.raises(ResponseError): - problem.grade_answers({'1_2_1': '42'}) + problem.grade_answers({"1_2_1": "42"}) def test_setup_randomization(self): # Ensure that the problem setup script gets the random seed from the problem. - script = textwrap.dedent(""" + script = textwrap.dedent( + """ num = {code} - """.format(code=self._get_random_number_code())) + """.format( + code=self._get_random_number_code() + ) + ) problem = self.build_problem(script=script) - assert problem.context['num'] == self._get_random_number_result(problem.seed) + assert problem.context["num"] == self._get_random_number_result(problem.seed) def test_check_function_randomization(self): # The check function should get random-seeded from the problem. - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): return {{'ok': True, 'msg': {code} }} - """.format(code=self._get_random_number_code())) + """.format( + code=self._get_random_number_code() + ) + ) problem = self.build_problem(script=script, cfn="check_func", expect="42") - input_dict = {'1_2_1': '42'} + input_dict = {"1_2_1": "42"} correct_map = problem.grade_answers(input_dict) - msg = correct_map.get_msg('1_2_1') + msg = correct_map.get_msg("1_2_1") assert msg == self._get_random_number_result(problem.seed) def test_random_isnt_none(self): @@ -2253,29 +2192,43 @@ def test_random_isnt_none(self): r.seed(10) num = r.randint(0, 1e9) - script = textwrap.dedent(""" + script = textwrap.dedent( + """ random.seed(10) num = random.randint(0, 1e9) - """) + """ + ) problem = self.build_problem(script=script) - assert problem.context['num'] == num + assert problem.context["num"] == num def test_module_imports_inline(self): - ''' + """ Check that the correct modules are available to custom response scripts - ''' + """ - for module_name in ['random', 'numpy', 'math', 'scipy', - 'calc', 'eia', 'chemcalc', 'chemtools', - 'miller', 'draganddrop']: + for module_name in [ + "random", + "numpy", + "math", + "scipy", + "calc", + "eia", + "chemcalc", + "chemtools", + "miller", + "draganddrop", + ]: # Create a script that checks that the name is defined # If the name is not defined, then the script # will raise an exception - script = textwrap.dedent(''' + script = textwrap.dedent( + """ correct[0] = 'correct' - assert('%s' in globals())''' % module_name) + assert('%s' in globals())""" + % module_name + ) # Create the problem problem = self.build_problem(answer=script) @@ -2283,28 +2236,40 @@ def test_module_imports_inline(self): # Expect that we can grade an answer without # getting an exception try: - problem.grade_answers({'1_2_1': '42'}) + problem.grade_answers({"1_2_1": "42"}) except ResponseError: self.fail("Could not use name '{0}s' in custom response".format(module_name)) def test_module_imports_function(self): - ''' + """ Check that the correct modules are available to custom response scripts - ''' + """ - for module_name in ['random', 'numpy', 'math', 'scipy', - 'calc', 'eia', 'chemcalc', 'chemtools', - 'miller', 'draganddrop']: + for module_name in [ + "random", + "numpy", + "math", + "scipy", + "calc", + "eia", + "chemcalc", + "chemtools", + "miller", + "draganddrop", + ]: # Create a script that checks that the name is defined # If the name is not defined, then the script # will raise an exception - script = textwrap.dedent(''' + script = textwrap.dedent( + """ def check_func(expect, answer_given): assert('%s' in globals()) - return True''' % module_name) + return True""" + % module_name + ) # Create the problem problem = self.build_problem(script=script, cfn="check_func") @@ -2312,7 +2277,7 @@ def check_func(expect, answer_given): # Expect that we can grade an answer without # getting an exception try: - problem.grade_answers({'1_2_1': '42'}) + problem.grade_answers({"1_2_1": "42"}) except ResponseError: self.fail("Could not use name '{0}s' in custom response".format(module_name)) @@ -2323,25 +2288,35 @@ def test_python_lib_zip_is_available(self): # Make a zipfile with one module in it with one function. zipstring = io.BytesIO() zipf = zipfile.ZipFile(zipstring, "w") # lint-amnesty, pylint: disable=consider-using-with - zipf.writestr("my_helper.py", textwrap.dedent("""\ + zipf.writestr( + "my_helper.py", + textwrap.dedent( + """\ def seventeen(): return 17 - """)) + """ + ), + ) zipf.close() # Use that module in our Python script. - script = textwrap.dedent(""" + script = textwrap.dedent( + """ import my_helper num = my_helper.seventeen() - """) + """ + ) capa_system = mock_capa_system() - capa_system.get_python_lib_zip = lambda: zipstring.getvalue() # lint-amnesty, pylint: disable=unnecessary-lambda + capa_system.get_python_lib_zip = ( + lambda: zipstring.getvalue() # lint-amnesty, pylint: disable=unnecessary-lambda + ) problem = self.build_problem(script=script, capa_system=capa_system) - assert problem.context['num'] == 17 + assert problem.context["num"] == 17 def test_function_code_multiple_inputs_order(self): # Ensure that order must be correct according to sub-problem position - script = textwrap.dedent(""" + script = textwrap.dedent( + """ def check_func(expect, answer_given): check1 = (int(answer_given[0]) == 1) check2 = (int(answer_given[1]) == 2) @@ -2368,27 +2343,38 @@ def check_func(expect, answer_given): { 'ok': check10, 'msg': '10'}, { 'ok': check11, 'msg': '11'}, ]} - """) + """ + ) problem = self.build_problem(script=script, cfn="check_func", num_inputs=11) # Grade the inputs showing out of order input_dict = { - '1_2_1': '1', - '1_2_2': '2', - '1_2_3': '3', - '1_2_4': '4', - '1_2_5': '5', - '1_2_6': '6', - '1_2_10': '10', - '1_2_11': '16', - '1_2_7': '7', - '1_2_8': '8', - '1_2_9': '9' + "1_2_1": "1", + "1_2_2": "2", + "1_2_3": "3", + "1_2_4": "4", + "1_2_5": "5", + "1_2_6": "6", + "1_2_10": "10", + "1_2_11": "16", + "1_2_7": "7", + "1_2_8": "8", + "1_2_9": "9", } correct_order = [ - '1_2_1', '1_2_2', '1_2_3', '1_2_4', '1_2_5', '1_2_6', '1_2_7', '1_2_8', '1_2_9', '1_2_10', '1_2_11' + "1_2_1", + "1_2_2", + "1_2_3", + "1_2_4", + "1_2_5", + "1_2_6", + "1_2_7", + "1_2_8", + "1_2_9", + "1_2_10", + "1_2_11", ] correct_map = problem.grade_answers(input_dict) @@ -2396,15 +2382,15 @@ def check_func(expect, answer_given): assert list(problem.student_answers.keys()) != correct_order # euqal to correct order after sorting at get_score - self.assertListEqual(list(problem.responders.values())[0].context['idset'], correct_order) + self.assertListEqual(list(problem.responders.values())[0].context["idset"], correct_order) - assert correct_map.get_correctness('1_2_1') == 'correct' - assert correct_map.get_correctness('1_2_9') == 'correct' - assert correct_map.get_correctness('1_2_11') == 'incorrect' + assert correct_map.get_correctness("1_2_1") == "correct" + assert correct_map.get_correctness("1_2_9") == "correct" + assert correct_map.get_correctness("1_2_11") == "incorrect" - assert correct_map.get_msg('1_2_1') == '1' - assert correct_map.get_msg('1_2_9') == '9' - assert correct_map.get_msg('1_2_11') == '11' + assert correct_map.get_msg("1_2_1") == "1" + assert correct_map.get_msg("1_2_9") == "9" + assert correct_map.get_msg("1_2_11") == "11" @use_unsafe_codejail() @@ -2412,6 +2398,7 @@ class SchematicResponseTest(ResponseTest): """ Class containing setup and tests for Schematic responsetype. """ + xml_factory_class = SchematicResponseXMLFactory def test_grade(self): @@ -2429,25 +2416,27 @@ def test_grade(self): # The actual dictionary would contain schematic information # sent from the JavaScript simulation - submission_dict = {'test': 'the_answer'} - input_dict = {'1_2_1': json.dumps(submission_dict)} + submission_dict = {"test": "the_answer"} + input_dict = {"1_2_1": json.dumps(submission_dict)} correct_map = problem.grade_answers(input_dict) # Expect that the problem is graded as true # (That is, our script verifies that the context # is what we expect) - assert correct_map.get_correctness('1_2_1') == 'correct' + assert correct_map.get_correctness("1_2_1") == "correct" def test_check_function_randomization(self): # The check function should get a random seed from the problem. - script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format(code=self._get_random_number_code()) # lint-amnesty, pylint: disable=line-too-long + script = "correct = ['correct' if (submission[0]['num'] == {code}) else 'incorrect']".format( + code=self._get_random_number_code() + ) # lint-amnesty, pylint: disable=line-too-long problem = self.build_problem(answer=script) - submission_dict = {'num': self._get_random_number_result(problem.seed)} - input_dict = {'1_2_1': json.dumps(submission_dict)} + submission_dict = {"num": self._get_random_number_result(problem.seed)} + input_dict = {"1_2_1": json.dumps(submission_dict)} correct_map = problem.grade_answers(input_dict) - assert correct_map.get_correctness('1_2_1') == 'correct' + assert correct_map.get_correctness("1_2_1") == "correct" def test_script_exception(self): # Construct a script that will raise an exception @@ -2456,8 +2445,8 @@ def test_script_exception(self): # Expect that an exception gets raised when we check the answer with pytest.raises(ResponseError): - submission_dict = {'test': 'test'} - input_dict = {'1_2_1': json.dumps(submission_dict)} + submission_dict = {"test": "test"} + input_dict = {"1_2_1": json.dumps(submission_dict)} problem.grade_answers(input_dict) @@ -2465,36 +2454,38 @@ class AnnotationResponseTest(ResponseTest): # lint-amnesty, pylint: disable=mis xml_factory_class = AnnotationResponseXMLFactory def test_grade(self): - (correct, partially, incorrect) = ('correct', 'partially-correct', 'incorrect') + (correct, partially, incorrect) = ("correct", "partially-correct", "incorrect") - answer_id = '1_2_1' - options = (('x', correct), ('y', partially), ('z', incorrect)) - make_answer = lambda option_ids: {answer_id: json.dumps({'options': option_ids})} + answer_id = "1_2_1" + options = (("x", correct), ("y", partially), ("z", incorrect)) + make_answer = lambda option_ids: {answer_id: json.dumps({"options": option_ids})} tests = [ - {'correctness': correct, 'points': 2, 'answers': make_answer([0])}, - {'correctness': partially, 'points': 1, 'answers': make_answer([1])}, - {'correctness': incorrect, 'points': 0, 'answers': make_answer([2])}, - {'correctness': incorrect, 'points': 0, 'answers': make_answer([0, 1, 2])}, - {'correctness': incorrect, 'points': 0, 'answers': make_answer([])}, - {'correctness': incorrect, 'points': 0, 'answers': make_answer('')}, - {'correctness': incorrect, 'points': 0, 'answers': make_answer(None)}, - {'correctness': incorrect, 'points': 0, 'answers': {answer_id: 'null'}}, + {"correctness": correct, "points": 2, "answers": make_answer([0])}, + {"correctness": partially, "points": 1, "answers": make_answer([1])}, + {"correctness": incorrect, "points": 0, "answers": make_answer([2])}, + {"correctness": incorrect, "points": 0, "answers": make_answer([0, 1, 2])}, + {"correctness": incorrect, "points": 0, "answers": make_answer([])}, + {"correctness": incorrect, "points": 0, "answers": make_answer("")}, + {"correctness": incorrect, "points": 0, "answers": make_answer(None)}, + {"correctness": incorrect, "points": 0, "answers": {answer_id: "null"}}, ] for test in tests: - expected_correctness = test['correctness'] - expected_points = test['points'] - answers = test['answers'] + expected_correctness = test["correctness"] + expected_points = test["points"] + answers = test["answers"] problem = self.build_problem(options=options) correct_map = problem.grade_answers(answers) actual_correctness = correct_map.get_correctness(answer_id) actual_points = correct_map.get_npoints(answer_id) - assert expected_correctness == actual_correctness,\ - ('%s should be marked %s' % (answer_id, expected_correctness)) - assert expected_points == actual_points, ('%s should have %d points' % (answer_id, expected_points)) + assert expected_correctness == actual_correctness, "%s should be marked %s" % ( + answer_id, + expected_correctness, + ) + assert expected_points == actual_points, "%s should have %d points" % (answer_id, expected_points) @use_unsafe_codejail() @@ -2535,7 +2526,7 @@ class ChoiceTextResponseTest(ResponseTest): "2_choices_1_input_2_incorrect": [(False, []), (True, ["123"])], "2_choices_2_inputs_correct": [(True, ["123"]), (False, [])], "2_choices_2_inputs_wrong_choice": [(False, ["123"]), (True, [])], - "2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])] + "2_choices_2_inputs_wrong_input": [(True, ["321"]), (False, [])], } # `TEST_SCENARIOS` is a dictionary of the form @@ -2570,68 +2561,43 @@ class ChoiceTextResponseTest(ResponseTest): "2_choices_1_input_2_incorrect": ("2_choices_1_input_2", "incorrect"), "2_choices_2_inputs_correct": ("2_choices_2_inputs", "correct"), "2_choices_2_inputs_wrong_choice": ("2_choices_2_inputs", "incorrect"), - "2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect") + "2_choices_2_inputs_wrong_input": ("2_choices_2_inputs", "incorrect"), } # Dictionary that maps from problem_name to arguments for # _make_problem, that will create the problem. TEST_PROBLEM_ARGS = { - "1_choice_0_input": {"choices": ("true", {}), "script": ''}, - "1_choice_1_input": { - "choices": ("true", {"answer": "123", "tolerance": "1"}), - "script": '' - }, - + "1_choice_0_input": {"choices": ("true", {}), "script": ""}, + "1_choice_1_input": {"choices": ("true", {"answer": "123", "tolerance": "1"}), "script": ""}, "1_input_script": { "choices": ("true", {"answer": "$computed_response", "tolerance": "1"}), - "script": "computed_response = math.sqrt(4)" + "script": "computed_response = math.sqrt(4)", }, - "1_choice_2_inputs": { - "choices": [ - ( - "true", ( - {"answer": "123", "tolerance": "1"}, - {"answer": "456", "tolerance": "10"} - ) - ) - ], - "script": '' - }, - "2_choices_0_inputs": { - "choices": [("false", {}), ("true", {})], - "script": '' - + "choices": [("true", ({"answer": "123", "tolerance": "1"}, {"answer": "456", "tolerance": "10"}))], + "script": "", }, + "2_choices_0_inputs": {"choices": [("false", {}), ("true", {})], "script": ""}, "2_choices_1_input_1": { - "choices": [ - ("false", {}), ("true", {"answer": "123", "tolerance": "0"}) - ], - "script": '' + "choices": [("false", {}), ("true", {"answer": "123", "tolerance": "0"})], + "script": "", }, "2_choices_1_input_2": { "choices": [("true", {}), ("false", {"answer": "123", "tolerance": "0"})], - "script": '' + "script": "", }, "2_choices_2_inputs": { - "choices": [ - ("true", {"answer": "123", "tolerance": "0"}), - ("false", {"answer": "999", "tolerance": "0"}) - ], - "script": '' - } + "choices": [("true", {"answer": "123", "tolerance": "0"}), ("false", {"answer": "999", "tolerance": "0"})], + "script": "", + }, } - def _make_problem(self, choices, in_type='radiotextgroup', script=''): + def _make_problem(self, choices, in_type="radiotextgroup", script=""): """ Convenience method to fill in default values for script and type if needed, then call self.build_problem """ - return self.build_problem( - choices=choices, - type=in_type, - script=script - ) + return self.build_problem(choices=choices, type=in_type, script=script) def _make_answer_dict(self, choice_list): """ @@ -2658,10 +2624,7 @@ def _make_answer_dict(self, choice_list): # In `answer_id` `index` represents the ordinality of the # choice and `ind` represents the ordinality of the # numtolerance_input inside the parent choice. - answer_id = "1_2_1_choiceinput_{index}_numtolerance_input_{ind}".format( - index=index, - ind=ind - ) + answer_id = "1_2_1_choiceinput_{index}_numtolerance_input_{ind}".format(index=index, ind=ind) answer_dict[answer_id] = answer return answer_dict @@ -2681,17 +2644,11 @@ def test_unchecked_input_not_validated(self): """ two_choice_two_input = self._make_problem( - [ - ("true", {"answer": "123", "tolerance": "1"}), - ("false", {}) - ], - "checkboxtextgroup" + [("true", {"answer": "123", "tolerance": "1"}), ("false", {})], "checkboxtextgroup" ) self.assert_grade( - two_choice_two_input, - self._make_answer_dict([(True, ["1"]), (False, ["Platypus"])]), - "incorrect" + two_choice_two_input, self._make_answer_dict([(True, ["1"]), (False, ["Platypus"])]), "incorrect" ) def test_interpret_error(self): @@ -2700,47 +2657,26 @@ def test_interpret_error(self): cause the response type to raise an error. """ two_choice_two_input = self._make_problem( - [ - ("true", {"answer": "123", "tolerance": "1"}), - ("false", {}) - ], - "checkboxtextgroup" + [("true", {"answer": "123", "tolerance": "1"}), ("false", {})], "checkboxtextgroup" ) with self.assertRaisesRegex(StudentInputError, "Could not interpret"): # Test that error is raised for input in selected correct choice. - self.assert_grade( - two_choice_two_input, - self._make_answer_dict([(True, ["Platypus"])]), - "correct" - ) + self.assert_grade(two_choice_two_input, self._make_answer_dict([(True, ["Platypus"])]), "correct") with self.assertRaisesRegex(StudentInputError, "Could not interpret"): # Test that error is raised for input in selected incorrect choice. self.assert_grade( - two_choice_two_input, - self._make_answer_dict([(True, ["1"]), (True, ["Platypus"])]), - "correct" + two_choice_two_input, self._make_answer_dict([(True, ["1"]), (True, ["Platypus"])]), "correct" ) def test_staff_answer_error(self): broken_problem = self._make_problem( - [("true", {"answer": "Platypus", "tolerance": "0"}), - ("true", {"answer": "edX", "tolerance": "0"}) - ], - "checkboxtextgroup" + [("true", {"answer": "Platypus", "tolerance": "0"}), ("true", {"answer": "edX", "tolerance": "0"})], + "checkboxtextgroup", ) - with self.assertRaisesRegex( - StudentInputError, - "The Staff answer could not be interpreted as a number." - ): - self.assert_grade( - broken_problem, - self._make_answer_dict( - [(True, ["1"]), (True, ["1"])] - ), - "correct" - ) + with self.assertRaisesRegex(StudentInputError, "The Staff answer could not be interpreted as a number."): + self.assert_grade(broken_problem, self._make_answer_dict([(True, ["1"]), (True, ["1"])]), "correct") def test_radio_grades(self): """ @@ -2759,17 +2695,9 @@ def test_radio_grades(self): test_choices = problem_args["choices"] test_script = problem_args["script"] # Build the actual problem for the test. - test_problem = self._make_problem(test_choices, 'radiotextgroup', test_script) + test_problem = self._make_problem(test_choices, "radiotextgroup", test_script) # Make sure the actual grade matches the expected grade. - self.assert_grade( - test_problem, - submission, - correctness, - msg="{0} should be {1}".format( - name, - correctness - ) - ) + self.assert_grade(test_problem, submission, correctness, msg="{0} should be {1}".format(name, correctness)) def test_checkbox_grades(self): """ @@ -2781,51 +2709,31 @@ def test_checkbox_grades(self): scenarios = { "2_choices_correct": ("checkbox_two_choices", "correct"), "2_choices_incorrect": ("checkbox_two_choices", "incorrect"), - - "2_choices_2_inputs_correct": ( - "checkbox_2_choices_2_inputs", - "correct" - ), - - "2_choices_2_inputs_missing_choice": ( - "checkbox_2_choices_2_inputs", - "incorrect" - ), - - "2_choices_2_inputs_wrong_input": ( - "checkbox_2_choices_2_inputs", - "incorrect" - ) + "2_choices_2_inputs_correct": ("checkbox_2_choices_2_inputs", "correct"), + "2_choices_2_inputs_missing_choice": ("checkbox_2_choices_2_inputs", "incorrect"), + "2_choices_2_inputs_wrong_input": ("checkbox_2_choices_2_inputs", "incorrect"), } # Dictionary scenario_name: test_inputs inputs = { "2_choices_correct": [(True, []), (True, [])], "2_choices_incorrect": [(True, []), (False, [])], "2_choices_2_inputs_correct": [(True, ["123"]), (True, ["456"])], - "2_choices_2_inputs_missing_choice": [ - (True, ["123"]), (False, ["456"]) - ], - "2_choices_2_inputs_wrong_input": [ - (True, ["123"]), (True, ["654"]) - ] + "2_choices_2_inputs_missing_choice": [(True, ["123"]), (False, ["456"])], + "2_choices_2_inputs_wrong_input": [(True, ["123"]), (True, ["654"])], } # Two choice zero input problem with both choices being correct. - checkbox_two_choices = self._make_problem( - [("true", {}), ("true", {})], "checkboxtextgroup" - ) + checkbox_two_choices = self._make_problem([("true", {}), ("true", {})], "checkboxtextgroup") # Two choice two input problem with both choices correct. checkbox_two_choices_two_inputs = self._make_problem( - [("true", {"answer": "123", "tolerance": "0"}), - ("true", {"answer": "456", "tolerance": "0"}) - ], - "checkboxtextgroup" + [("true", {"answer": "123", "tolerance": "0"}), ("true", {"answer": "456", "tolerance": "0"})], + "checkboxtextgroup", ) # Dictionary problem_name: problem problems = { "checkbox_two_choices": checkbox_two_choices, - "checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs + "checkbox_2_choices_2_inputs": checkbox_two_choices_two_inputs, } for name, inputs in inputs.items(): @@ -2836,9 +2744,4 @@ def test_checkbox_grades(self): problem = problems[problem_name] # Make sure the actual grade matches the expected grade - self.assert_grade( - problem, - submission, - correctness, - msg="{0} should be {1}".format(name, correctness) - ) + self.assert_grade(problem, submission, correctness, msg="{0} should be {1}".format(name, correctness)) diff --git a/xmodule/capa/tests/test_shuffle.py b/xmodule/capa/tests/test_shuffle.py index d7ce39f00898..516cbddb0c63 100644 --- a/xmodule/capa/tests/test_shuffle.py +++ b/xmodule/capa/tests/test_shuffle.py @@ -1,11 +1,10 @@ """Tests the capa shuffle and name-masking.""" - import textwrap import unittest from xmodule.capa.responsetypes import LoncapaProblemError -from xmodule.capa.tests.helpers import new_loncapa_problem, mock_capa_system +from xmodule.capa.tests.helpers import mock_capa_system, new_loncapa_problem class CapaShuffleTest(unittest.TestCase): @@ -16,7 +15,8 @@ def setUp(self): self.system = mock_capa_system() def test_shuffle_4_choices(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -27,7 +27,8 @@ def test_shuffle_4_choices(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) # shuffling 4 things with seed of 0 yields: B A C D # Check that the choices are shuffled @@ -36,11 +37,12 @@ def test_shuffle_4_choices(self): # Check that choice name masking is enabled and that unmasking works response = list(problem.responders.values())[0] assert not response.has_mask() - assert response.unmask_order() == ['choice_1', 'choice_0', 'choice_2', 'choice_3'] - assert the_html == problem.get_html(), 'should be able to call get_html() twice' + assert response.unmask_order() == ["choice_1", "choice_0", "choice_2", "choice_3"] + assert the_html == problem.get_html(), "should be able to call get_html() twice" def test_shuffle_custom_names(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -51,17 +53,19 @@ def test_shuffle_custom_names(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) # B A C D # Check that the custom name= names come through response = list(problem.responders.values())[0] assert not response.has_mask() assert response.has_shuffle() - assert response.unmask_order() == ['choice_0', 'choice_aaa', 'choice_1', 'choice_ddd'] + assert response.unmask_order() == ["choice_0", "choice_aaa", "choice_1", "choice_ddd"] def test_shuffle_different_seed(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -72,13 +76,15 @@ def test_shuffle_different_seed(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=341) # yields D A B C the_html = problem.get_html() self.assertRegex(the_html, r"
                    .*\[.*'Donut'.*'Apple'.*'Banana'.*'Chocolate'.*\].*
                    ") def test_shuffle_1_choice(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -86,17 +92,19 @@ def test_shuffle_1_choice(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegex(the_html, r"
                    .*\[.*'Apple'.*\].*
                    ") response = list(problem.responders.values())[0] assert not response.has_mask() assert response.has_shuffle() - assert response.unmask_order() == ['choice_0'] + assert response.unmask_order() == ["choice_0"] def test_shuffle_6_choices(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -109,14 +117,18 @@ def test_shuffle_6_choices(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) # yields: C E A B D F # Donut -> Zonut to show that there is not some hidden alphabetic ordering going on the_html = problem.get_html() - self.assertRegex(the_html, r"
                    .*\[.*'Chocolate'.*'Eggplant'.*'Apple'.*'Banana'.*'Zonut'.*'Filet Mignon'.*\].*
                    ") # lint-amnesty, pylint: disable=line-too-long + self.assertRegex( + the_html, r"
                    .*\[.*'Chocolate'.*'Eggplant'.*'Apple'.*'Banana'.*'Zonut'.*'Filet Mignon'.*\].*
                    " + ) # lint-amnesty, pylint: disable=line-too-long def test_shuffle_false(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -127,7 +139,8 @@ def test_shuffle_false(self): - """) + """ + ) problem = new_loncapa_problem(xml_str) the_html = problem.get_html() self.assertRegex(the_html, r"
                    .*\[.*'Apple'.*'Banana'.*'Chocolate'.*'Donut'.*\].*
                    ") @@ -136,7 +149,8 @@ def test_shuffle_false(self): assert not response.has_shuffle() def test_shuffle_fixed_head_end(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -149,14 +163,16 @@ def test_shuffle_fixed_head_end(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() # Alpha Beta held back from shuffle (head end) self.assertRegex(the_html, r"
                    .*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*\].*
                    ") def test_shuffle_fixed_tail_end(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -169,14 +185,16 @@ def test_shuffle_fixed_tail_end(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() # Alpha Beta held back from shuffle (tail end) self.assertRegex(the_html, r"
                    .*\[.*'B'.*'A'.*'C'.*'D'.*'Alpha'.*'Beta'.*\].*
                    ") def test_shuffle_fixed_both_ends(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -192,16 +210,15 @@ def test_shuffle_fixed_both_ends(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() - self.assertRegex( - the_html, - r"
                    .*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*'Psi'.*'Omega'.*\].*
                    " - ) + self.assertRegex(the_html, r"
                    .*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*'Psi'.*'Omega'.*\].*
                    ") def test_shuffle_fixed_both_ends_thin(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -211,13 +228,15 @@ def test_shuffle_fixed_both_ends_thin(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegex(the_html, r"
                    .*\[.*'Alpha'.*'A'.*'Omega'.*\].*
                    ") def test_shuffle_fixed_all(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -227,14 +246,16 @@ def test_shuffle_fixed_all(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegex(the_html, r"
                    .*\[.*'A'.*'B'.*'C'.*\].*
                    ") def test_shuffle_island(self): """A fixed 'island' choice not at the head or tail end gets lumped into the tail end.""" - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -246,13 +267,15 @@ def test_shuffle_island(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) the_html = problem.get_html() self.assertRegex(the_html, r"
                    .*\[.*'A'.*'Mid'.*'Mid'.*'C'.*'D'.*\].*
                    ") def test_multiple_shuffle_responses(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -272,25 +295,30 @@ def test_multiple_shuffle_responses(self): - """) + """ + ) problem = new_loncapa_problem(xml_str, seed=0) orig_html = problem.get_html() - assert orig_html == problem.get_html(), 'should be able to call get_html() twice' - html = orig_html.replace('\n', ' ') # avoid headaches with .* matching + assert orig_html == problem.get_html(), "should be able to call get_html() twice" + html = orig_html.replace("\n", " ") # avoid headaches with .* matching print(html) - self.assertRegex(html, r"
                    .*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*
                    .*" + - r"
                    .*\[.*'C'.*'A'.*'D'.*'B'.*\].*
                    ") + self.assertRegex( + html, + r"
                    .*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*
                    .*" + + r"
                    .*\[.*'C'.*'A'.*'D'.*'B'.*\].*
                    ", + ) # Look at the responses in their authored order - responses = sorted(list(problem.responders.values()), key=lambda resp: int(resp.id[resp.id.rindex('_') + 1:])) + responses = sorted(list(problem.responders.values()), key=lambda resp: int(resp.id[resp.id.rindex("_") + 1 :])) assert not responses[0].has_mask() assert responses[0].has_shuffle() assert responses[1].has_shuffle() - assert responses[0].unmask_order() == ['choice_1', 'choice_0', 'choice_2', 'choice_3'] - assert responses[1].unmask_order() == ['choice_2', 'choice_0', 'choice_3', 'choice_1'] + assert responses[0].unmask_order() == ["choice_1", "choice_0", "choice_2", "choice_3"] + assert responses[1].unmask_order() == ["choice_2", "choice_0", "choice_3", "choice_1"] def test_shuffle_not_with_answerpool(self): """Raise error if shuffle and answer-pool are both used.""" - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ @@ -302,7 +330,8 @@ def test_shuffle_not_with_answerpool(self): - """) + """ + ) with self.assertRaisesRegex(LoncapaProblemError, "shuffle and answer-pool"): new_loncapa_problem(xml_str) diff --git a/xmodule/capa/tests/test_targeted_feedback.py b/xmodule/capa/tests/test_targeted_feedback.py index 0ec13b4962a2..399164791bd6 100644 --- a/xmodule/capa/tests/test_targeted_feedback.py +++ b/xmodule/capa/tests/test_targeted_feedback.py @@ -3,23 +3,24 @@ i.e. those with the element """ - import textwrap import unittest -from xmodule.capa.tests.helpers import load_fixture, new_loncapa_problem, mock_capa_system + +from xmodule.capa.tests.helpers import load_fixture, mock_capa_system, new_loncapa_problem class CapaTargetedFeedbackTest(unittest.TestCase): - ''' + """ Testing class - ''' + """ def setUp(self): super(CapaTargetedFeedbackTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.system = mock_capa_system() def test_no_targeted_feedback(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -71,7 +72,8 @@ def test_no_targeted_feedback(self):
                    - """) + """ + ) problem = new_loncapa_problem(xml_str) @@ -82,56 +84,65 @@ def test_no_targeted_feedback(self): self.assertRegex(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC") def test_targeted_feedback_not_finished(self): - problem = new_loncapa_problem(load_fixture('targeted_feedback.xml')) + problem = new_loncapa_problem(load_fixture("targeted_feedback.xml")) the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") self.assertRegex(without_new_lines, r"
                    .*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*
                    ") self.assertNotRegex(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC") - assert the_html == problem.get_html(), 'Should be able to call get_html() twice' + assert the_html == problem.get_html(), "Should be able to call get_html() twice" def test_targeted_feedback_student_answer1(self): - problem = new_loncapa_problem(load_fixture('targeted_feedback.xml')) + problem = new_loncapa_problem(load_fixture("targeted_feedback.xml")) problem.done = True - problem.student_answers = {'1_2_1': 'choice_3'} + problem.student_answers = {"1_2_1": "choice_3"} the_html = problem.get_html() without_new_lines = the_html.replace("\\n", "").replace("\n", "") # pylint: disable=line-too-long - self.assertRegex(without_new_lines, r"\s*Incorrect.*3rd WRONG solution") + self.assertRegex( + without_new_lines, + r"\s*Incorrect.*3rd WRONG solution", + ) self.assertNotRegex(without_new_lines, r"feedback1|feedback2|feedbackC") # Check that calling it multiple times yields the same thing the_html2 = problem.get_html() assert the_html == the_html2 def test_targeted_feedback_student_answer2(self): - problem = new_loncapa_problem(load_fixture('targeted_feedback.xml')) + problem = new_loncapa_problem(load_fixture("targeted_feedback.xml")) problem.done = True - problem.student_answers = {'1_2_1': 'choice_0'} + problem.student_answers = {"1_2_1": "choice_0"} the_html = problem.get_html() without_new_lines = the_html.replace("\\n", "").replace("\n", "") # pylint: disable=line-too-long - self.assertRegex(without_new_lines, r"\s*Incorrect.*1st WRONG solution") + self.assertRegex( + without_new_lines, + r"\s*Incorrect.*1st WRONG solution", + ) self.assertRegex(without_new_lines, r"
                    \{.*'1_solution_1'.*\}
                    ") self.assertNotRegex(without_new_lines, r"feedback2|feedback3|feedbackC") def test_targeted_feedback_correct_answer(self): - """ Test the case of targeted feedback for a correct answer. """ - problem = new_loncapa_problem(load_fixture('targeted_feedback.xml')) + """Test the case of targeted feedback for a correct answer.""" + problem = new_loncapa_problem(load_fixture("targeted_feedback.xml")) problem.done = True - problem.student_answers = {'1_2_1': 'choice_2'} + problem.student_answers = {"1_2_1": "choice_2"} the_html = problem.get_html() without_new_lines = the_html.replace("\\n", "").replace("\n", "") # pylint: disable=line-too-long - self.assertRegex(without_new_lines, - r"\s*Correct.*Feedback on your correct solution...") + self.assertRegex( + without_new_lines, + r"\s*Correct.*Feedback on your correct solution...", + ) self.assertNotRegex(without_new_lines, r"feedback1|feedback2|feedback3") def test_targeted_feedback_id_typos(self): """Cases where the explanation-id's don't match anything.""" - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -182,24 +193,26 @@ def test_targeted_feedback_id_typos(self):
                    - """) + """ + ) # explanation-id does not match anything: fall back to empty targetedfeedbackset problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_0'} + problem.student_answers = {"1_2_1": "choice_0"} the_html = problem.get_html() self.assertRegex(the_html, r"\s*") # New problem with same XML -- try the correct choice. problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_2'} # correct + problem.student_answers = {"1_2_1": "choice_2"} # correct the_html = problem.get_html() self.assertRegex(the_html, r"\s*") def test_targeted_feedback_no_solution_element(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -219,22 +232,21 @@ def test_targeted_feedback_no_solution_element(self):
                    - """) + """ + ) # Solution element not found problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_2'} + problem.student_answers = {"1_2_1": "choice_2"} the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") #
                    right after - self.assertRegex( - without_new_lines, - r"
                    .*.*\s*
                    " - ) + self.assertRegex(without_new_lines, r"
                    .*.*\s*
                    ") def test_targeted_feedback_show_solution_explanation(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -286,16 +298,20 @@ def test_targeted_feedback_show_solution_explanation(self):
                    - """) + """ + ) problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_0'} + problem.student_answers = {"1_2_1": "choice_0"} the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") # pylint: disable=line-too-long - self.assertRegex(without_new_lines, r".*1st WRONG solution") + self.assertRegex( + without_new_lines, + r".*1st WRONG solution", + ) self.assertRegex(without_new_lines, r"\{.*'1_solution_1'.*\}
                    ") self.assertNotRegex(without_new_lines, r"feedback2|feedback3") @@ -304,7 +320,8 @@ def test_targeted_feedback_show_solution_explanation(self): assert the_html == the_html2 def test_targeted_feedback_no_show_solution_explanation(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -356,22 +373,27 @@ def test_targeted_feedback_no_show_solution_explanation(self):
                    - """) + """ + ) problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_0'} + problem.student_answers = {"1_2_1": "choice_0"} the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") # pylint: disable=line-too-long - self.assertRegex(without_new_lines, r".*1st WRONG solution") + self.assertRegex( + without_new_lines, + r".*1st WRONG solution", + ) self.assertNotRegex(without_new_lines, r"\{.*'1_solution_1'.*\}
                    ") self.assertNotRegex(without_new_lines, r"feedback2|feedback3|feedbackC") def test_targeted_feedback_with_solutionset_explanation(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -433,22 +455,29 @@ def test_targeted_feedback_with_solutionset_explanation(self):
                    - """) + """ + ) problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_0'} + problem.student_answers = {"1_2_1": "choice_0"} the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") # pylint: disable=line-too-long - self.assertRegex(without_new_lines, r".*1st WRONG solution") - self.assertRegex(without_new_lines, r".*1st WRONG solution", + ) + self.assertRegex( + without_new_lines, r"\{.*'1_solution_1'.*\}
                    ") self.assertNotRegex(without_new_lines, r"feedback2|feedback3") def test_targeted_feedback_no_feedback_for_selected_choice1(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -495,13 +524,14 @@ def test_targeted_feedback_no_feedback_for_selected_choice1(self):
                    - """) + """ + ) # The student choses one with no feedback, but alwaysShowCorrectChoiceExplanation # is in force, so we should see the correct solution feedback. problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_1'} + problem.student_answers = {"1_2_1": "choice_1"} the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") @@ -511,7 +541,8 @@ def test_targeted_feedback_no_feedback_for_selected_choice1(self): self.assertNotRegex(without_new_lines, r"feedback1|feedback3") def test_targeted_feedback_no_feedback_for_selected_choice2(self): - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """

                    What is the correct answer?

                    @@ -558,12 +589,13 @@ def test_targeted_feedback_no_feedback_for_selected_choice2(self):
                    - """) + """ + ) # The student chooses one with no feedback set, so we check that there's no feedback. problem = new_loncapa_problem(xml_str) problem.done = True - problem.student_answers = {'1_2_1': 'choice_1'} + problem.student_answers = {"1_2_1": "choice_1"} the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") @@ -574,37 +606,37 @@ def test_targeted_feedback_no_feedback_for_selected_choice2(self): def test_targeted_feedback_multiple_not_answered(self): # Not answered -> empty targeted feedback - problem = new_loncapa_problem(load_fixture('targeted_feedback_multiple.xml')) + problem = new_loncapa_problem(load_fixture("targeted_feedback_multiple.xml")) the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") # Q1 and Q2 have no feedback self.assertRegex( without_new_lines, - r'\s*.*\s*' + r"\s*.*\s*", ) def test_targeted_feedback_multiple_answer_1(self): - problem = new_loncapa_problem(load_fixture('targeted_feedback_multiple.xml')) + problem = new_loncapa_problem(load_fixture("targeted_feedback_multiple.xml")) problem.done = True - problem.student_answers = {'1_2_1': 'choice_0'} # feedback1 + problem.student_answers = {"1_2_1": "choice_0"} # feedback1 the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") # Q1 has feedback1 and Q2 has nothing self.assertRegex( without_new_lines, - r'.*?explanation-id="feedback1".*?.*' + - r'\s*' + r'.*?explanation-id="feedback1".*?.*' + + r"\s*", ) def test_targeted_feedback_multiple_answer_2(self): - problem = new_loncapa_problem(load_fixture('targeted_feedback_multiple.xml')) + problem = new_loncapa_problem(load_fixture("targeted_feedback_multiple.xml")) problem.done = True - problem.student_answers = {'1_2_1': 'choice_0', '1_3_1': 'choice_2'} # Q1 wrong, Q2 correct + problem.student_answers = {"1_2_1": "choice_0", "1_3_1": "choice_2"} # Q1 wrong, Q2 correct the_html = problem.get_html() without_new_lines = the_html.replace("\n", "") # Q1 has feedback1 and Q2 has feedbackC self.assertRegex( without_new_lines, - r'.*?explanation-id="feedback1".*?.*' + - r'.*explanation-id="feedbackC".*?' + r'.*?explanation-id="feedback1".*?.*' + + r'.*explanation-id="feedbackC".*?', ) diff --git a/xmodule/capa/tests/test_util.py b/xmodule/capa/tests/test_util.py index 3176ff9b9a3d..da62d2d01d98 100644 --- a/xmodule/capa/tests/test_util.py +++ b/xmodule/capa/tests/test_util.py @@ -17,7 +17,7 @@ contextualize_text, get_inner_html_from_xpath, remove_markup, - sanitize_html + sanitize_html, ) @@ -38,24 +38,24 @@ def test_compare_with_tolerance(self): # lint-amnesty, pylint: disable=too-many result = compare_with_tolerance(101.0, 100.0) assert not result # Test absolute percentage tolerance - result = compare_with_tolerance(109.9, 100.0, '10%', False) + result = compare_with_tolerance(109.9, 100.0, "10%", False) assert result - result = compare_with_tolerance(110.1, 100.0, '10%', False) + result = compare_with_tolerance(110.1, 100.0, "10%", False) assert not result # Test relative percentage tolerance - result = compare_with_tolerance(111.0, 100.0, '10%', True) + result = compare_with_tolerance(111.0, 100.0, "10%", True) assert result - result = compare_with_tolerance(112.0, 100.0, '10%', True) + result = compare_with_tolerance(112.0, 100.0, "10%", True) assert not result # Test absolute tolerance (string) - result = compare_with_tolerance(109.9, 100.0, '10.0', False) + result = compare_with_tolerance(109.9, 100.0, "10.0", False) assert result - result = compare_with_tolerance(110.1, 100.0, '10.0', False) + result = compare_with_tolerance(110.1, 100.0, "10.0", False) assert not result # Test relative tolerance (string) - result = compare_with_tolerance(111.0, 100.0, '0.1', True) + result = compare_with_tolerance(111.0, 100.0, "0.1", True) assert result - result = compare_with_tolerance(112.0, 100.0, '0.1', True) + result = compare_with_tolerance(112.0, 100.0, "0.1", True) assert not result # Test absolute tolerance (float) result = compare_with_tolerance(109.9, 100.0, 10.0, False) @@ -68,7 +68,7 @@ def test_compare_with_tolerance(self): # lint-amnesty, pylint: disable=too-many result = compare_with_tolerance(112.0, 100.0, 0.1, True) assert not result ##### Infinite values ##### - infinity = float('Inf') + infinity = float("Inf") # Test relative tolerance (float) result = compare_with_tolerance(infinity, 100.0, 1.0, True) assert not result @@ -84,25 +84,25 @@ def test_compare_with_tolerance(self): # lint-amnesty, pylint: disable=too-many result = compare_with_tolerance(infinity, infinity, 1.0, False) assert result # Test relative tolerance (string) - result = compare_with_tolerance(infinity, 100.0, '1.0', True) + result = compare_with_tolerance(infinity, 100.0, "1.0", True) assert not result - result = compare_with_tolerance(100.0, infinity, '1.0', True) + result = compare_with_tolerance(100.0, infinity, "1.0", True) assert not result - result = compare_with_tolerance(infinity, infinity, '1.0', True) + result = compare_with_tolerance(infinity, infinity, "1.0", True) assert result # Test absolute tolerance (string) - result = compare_with_tolerance(infinity, 100.0, '1.0', False) + result = compare_with_tolerance(infinity, 100.0, "1.0", False) assert not result - result = compare_with_tolerance(100.0, infinity, '1.0', False) + result = compare_with_tolerance(100.0, infinity, "1.0", False) assert not result - result = compare_with_tolerance(infinity, infinity, '1.0', False) + result = compare_with_tolerance(infinity, infinity, "1.0", False) assert result # Test absolute tolerance for smaller values result = compare_with_tolerance(100.01, 100.0, 0.01, False) assert result result = compare_with_tolerance(100.001, 100.0, 0.001, False) assert result - result = compare_with_tolerance(100.01, 100.0, '0.01%', False) + result = compare_with_tolerance(100.01, 100.0, "0.01%", False) assert result result = compare_with_tolerance(100.002, 100.0, 0.001, False) assert not result @@ -116,21 +116,21 @@ def test_compare_with_tolerance(self): # lint-amnesty, pylint: disable=too-many assert not result result = compare_with_tolerance(100.01, complex(100.0, 0), 0.010, False) assert result - result = compare_with_tolerance(110.1, complex(100.0, 0), '10.0', False) + result = compare_with_tolerance(110.1, complex(100.0, 0), "10.0", False) assert not result - result = compare_with_tolerance(111.0, complex(100.0, 0), '10%', True) + result = compare_with_tolerance(111.0, complex(100.0, 0), "10%", True) assert result def test_sanitize_html(self): """ Test for html sanitization with nh3. """ - allowed_tags = ['div', 'p', 'audio', 'pre', 'span'] + allowed_tags = ["div", "p", "audio", "pre", "span"] for tag in allowed_tags: queue_msg = "<{0}>Test message".format(tag) assert sanitize_html(queue_msg) == queue_msg - not_allowed_tag = 'script' + not_allowed_tag = "script" queue_msg = "<{0}>Test message".format(not_allowed_tag) expected = "" assert sanitize_html(queue_msg) == expected @@ -146,27 +146,26 @@ def test_remove_markup(self): """ Test for markup removal with nh3. """ - assert remove_markup('The Truth is Out There & you need to find it') ==\ - 'The Truth is Out There & you need to find it' + assert ( + remove_markup("The Truth is Out There & you need to find it") + == "The Truth is Out There & you need to find it" + ) - @ddt.data( - 'When the root level failš the whole hierarchy won’t work anymore.', - 'あなたあなたあなた' - ) + @ddt.data("When the root level failš the whole hierarchy won’t work anymore.", "あなたあなたあなた") def test_contextualize_text(self, context_value): """Verify that variable substitution works as intended with non-ascii characters.""" - key = 'answer0' - text = '$answer0' + key = "answer0" + text = "$answer0" context = {key: context_value} contextual_text = contextualize_text(text, context) assert context_value == contextual_text def test_contextualize_text_with_non_ascii_context(self): """Verify that variable substitution works as intended with non-ascii characters.""" - key = 'あなた$a $b' - text = '$' + key - context = {'a': 'あなたあなたあなた', 'b': 'あなたhi'} - expected_text = '$あなたあなたあなたあなた あなたhi' + key = "あなた$a $b" + text = "$" + key + context = {"a": "あなたあなたあなた", "b": "あなたhi"} + expected_text = "$あなたあなたあなたあなた あなたhi" contextual_text = contextualize_text(text, context) assert expected_text == contextual_text diff --git a/xmodule/capa/tests/test_xqueue_interface.py b/xmodule/capa/tests/test_xqueue_interface.py index db06fbfcb367..91f7e838578e 100644 --- a/xmodule/capa/tests/test_xqueue_interface.py +++ b/xmodule/capa/tests/test_xqueue_interface.py @@ -1,14 +1,14 @@ """Test the XQueue service and interface.""" +import json from unittest import TestCase from unittest.mock import Mock, patch +import pytest from django.conf import settings from django.test.utils import override_settings from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator from xblock.fields import ScopeIds -import pytest -import json from openedx.core.djangolib.testing.utils import skip_unless_lms from xmodule.capa.xqueue_interface import XQueueInterface, XQueueService @@ -59,14 +59,14 @@ def test_construct_callback_with_flag_enabled(self, mock_flag): def test_construct_callback_with_flag_disabled(self, mock_flag): """Test construct_callback when the waffle flag is disabled.""" usage_id = self.block.scope_ids.usage_id - callback_url = f'courses/{usage_id.context_key}/xqueue/user1/{usage_id}' + callback_url = f"courses/{usage_id.context_key}/xqueue/user1/{usage_id}" - assert self.service.construct_callback() == f'{settings.LMS_ROOT_URL}/{callback_url}/score_update' - assert self.service.construct_callback('alt_dispatch') == f'{settings.LMS_ROOT_URL}/{callback_url}/alt_dispatch' + assert self.service.construct_callback() == f"{settings.LMS_ROOT_URL}/{callback_url}/score_update" + assert self.service.construct_callback("alt_dispatch") == f"{settings.LMS_ROOT_URL}/{callback_url}/alt_dispatch" - custom_callback_url = 'http://alt.url' - with override_settings(XQUEUE_INTERFACE={**settings.XQUEUE_INTERFACE, 'callback_url': custom_callback_url}): - assert self.service.construct_callback() == f'{custom_callback_url}/{callback_url}/score_update' + custom_callback_url = "http://alt.url" + with override_settings(XQUEUE_INTERFACE={**settings.XQUEUE_INTERFACE, "callback_url": custom_callback_url}): + assert self.service.construct_callback() == f"{custom_callback_url}/{callback_url}/score_update" def test_default_queuename(self): """Check the format of the default queue name.""" @@ -90,16 +90,20 @@ def test_send_to_queue_with_flag_enabled(mock_send_to_submission, mock_flag): block = Mock() # Mock block for the constructor xqueue_interface = XQueueInterface(url, django_auth, block=block) - header = json.dumps({ - "lms_callback_url": ( - "http://example.com/courses/course-v1:test_org+test_course+test_run/" - "xqueue/block@item_id/type@problem" - ), - }) - body = json.dumps({ - "student_info": json.dumps({"anonymous_student_id": "student_id"}), - "student_response": "student_answer", - }) + header = json.dumps( + { + "lms_callback_url": ( + "http://example.com/courses/course-v1:test_org+test_course+test_run/" + "xqueue/block@item_id/type@problem" + ), + } + ) + body = json.dumps( + { + "student_info": json.dumps({"anonymous_student_id": "student_id"}), + "student_response": "student_answer", + } + ) files_to_upload = None mock_send_to_submission.return_value = {"submission": "mock_submission"} @@ -118,16 +122,20 @@ def test_send_to_queue_with_flag_disabled(mock_http_post, mock_flag): block = Mock() # Mock block for the constructor xqueue_interface = XQueueInterface(url, django_auth, block=block) - header = json.dumps({ - "lms_callback_url": ( - "http://example.com/courses/course-v1:test_org+test_course+test_run/" - "xqueue/block@item_id/type@problem" - ), - }) - body = json.dumps({ - "student_info": json.dumps({"anonymous_student_id": "student_id"}), - "student_response": "student_answer", - }) + header = json.dumps( + { + "lms_callback_url": ( + "http://example.com/courses/course-v1:test_org+test_course+test_run/" + "xqueue/block@item_id/type@problem" + ), + } + ) + body = json.dumps( + { + "student_info": json.dumps({"anonymous_student_id": "student_id"}), + "student_response": "student_answer", + } + ) files_to_upload = None mock_http_post.return_value = (0, "Submission sent successfully") diff --git a/xmodule/capa/tests/test_xqueue_submission.py b/xmodule/capa/tests/test_xqueue_submission.py index 704c6249d447..806a103f6ac2 100644 --- a/xmodule/capa/tests/test_xqueue_submission.py +++ b/xmodule/capa/tests/test_xqueue_submission.py @@ -1,25 +1,24 @@ """ Unit tests for the XQueueInterfaceSubmission class. """ + import json -import pytest from unittest.mock import Mock, patch -from xmodule.capa.xqueue_submission import XQueueInterfaceSubmission + +import pytest from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator from xblock.fields import ScopeIds +from xmodule.capa.xqueue_submission import XQueueInterfaceSubmission + @pytest.fixture def xqueue_service(): """ Fixture that returns an instance of XQueueInterfaceSubmission. """ - location = BlockUsageLocator( - CourseLocator("test_org", "test_course", "test_run"), - "problem", - "ExampleProblem" - ) - block = Mock(scope_ids=ScopeIds('user1', 'problem', location, location)) + location = BlockUsageLocator(CourseLocator("test_org", "test_course", "test_run"), "problem", "ExampleProblem") + block = Mock(scope_ids=ScopeIds("user1", "problem", location, location)) block.max_score = Mock(return_value=10) return XQueueInterfaceSubmission(block) @@ -28,49 +27,52 @@ def test_get_submission_params(xqueue_service): """ Test extracting item data from an xqueue submission. """ - header = json.dumps({ - 'lms_callback_url': 'http://example.com/callback', - 'queue_name': 'default' - }) - payload = json.dumps({ - 'student_info': json.dumps({'anonymous_student_id': 'student_id'}), - 'student_response': 'student_answer', - 'grader_payload': json.dumps({'grader': 'test.py'}) - }) - - student_item, student_answer, queue_name, grader_file_name, points_possible = ( - xqueue_service.get_submission_params(header, payload) + header = json.dumps({"lms_callback_url": "http://example.com/callback", "queue_name": "default"}) + payload = json.dumps( + { + "student_info": json.dumps({"anonymous_student_id": "student_id"}), + "student_response": "student_answer", + "grader_payload": json.dumps({"grader": "test.py"}), + } + ) + + student_item, student_answer, queue_name, grader_file_name, points_possible = xqueue_service.get_submission_params( + header, payload ) assert student_item == { - 'item_id': 'block-v1:test_org+test_course+test_run+type@problem+block@ExampleProblem', - 'item_type': 'problem', - 'course_id': 'course-v1:test_org+test_course+test_run', - 'student_id': 'student_id' + "item_id": "block-v1:test_org+test_course+test_run+type@problem+block@ExampleProblem", + "item_type": "problem", + "course_id": "course-v1:test_org+test_course+test_run", + "student_id": "student_id", } - assert student_answer == 'student_answer' - assert queue_name == 'default' - assert grader_file_name == 'test.py' + assert student_answer == "student_answer" + assert queue_name == "default" + assert grader_file_name == "test.py" assert points_possible == 10 @pytest.mark.django_db -@patch('submissions.api.create_external_grader_detail') +@patch("submissions.api.create_external_grader_detail") def test_send_to_submission(mock_create_external_grader_detail, xqueue_service): """ Test sending a submission to the grading system. """ - header = json.dumps({ - 'lms_callback_url': ( - 'http://example.com/courses/course-v1:test_org+test_course+test_run/xqueue/5/' - 'block-v1:test_org+test_course+test_run+type@problem+block@ExampleProblem/' - ), - }) - body = json.dumps({ - 'student_info': json.dumps({'anonymous_student_id': 'student_id'}), - 'student_response': 'student_answer', - 'grader_payload': json.dumps({'grader': 'test.py'}) - }) + header = json.dumps( + { + "lms_callback_url": ( + "http://example.com/courses/course-v1:test_org+test_course+test_run/xqueue/5/" + "block-v1:test_org+test_course+test_run+type@problem+block@ExampleProblem/" + ), + } + ) + body = json.dumps( + { + "student_info": json.dumps({"anonymous_student_id": "student_id"}), + "student_response": "student_answer", + "grader_payload": json.dumps({"grader": "test.py"}), + } + ) mock_response = {"submission": "mock_submission"} mock_create_external_grader_detail.return_value = mock_response @@ -80,35 +82,38 @@ def test_send_to_submission(mock_create_external_grader_detail, xqueue_service): assert result == mock_response mock_create_external_grader_detail.assert_called_once_with( { - 'item_id': 'block-v1:test_org+test_course+test_run+type@problem+block@ExampleProblem', - 'item_type': 'problem', - 'course_id': 'course-v1:test_org+test_course+test_run', - 'student_id': 'student_id' + "item_id": "block-v1:test_org+test_course+test_run+type@problem+block@ExampleProblem", + "item_type": "problem", + "course_id": "course-v1:test_org+test_course+test_run", + "student_id": "student_id", }, - 'student_answer', - queue_name='default', - grader_file_name='test.py', + "student_answer", + queue_name="default", + grader_file_name="test.py", points_possible=10, - files=None + files=None, ) @pytest.mark.django_db -@patch('submissions.api.create_external_grader_detail') +@patch("submissions.api.create_external_grader_detail") def test_send_to_submission_with_missing_fields(mock_create_external_grader_detail, xqueue_service): """ Test send_to_submission with missing required fields. """ - header = json.dumps({ - 'lms_callback_url': ( - 'http://example.com/courses/course-v1:test_org+test_course+test_run/xqueue/5/' - 'block@item_id/' - ) - }) - body = json.dumps({ - 'student_info': json.dumps({'anonymous_student_id': 'student_id'}), - 'grader_payload': json.dumps({'grader': 'test.py'}) - }) + header = json.dumps( + { + "lms_callback_url": ( + "http://example.com/courses/course-v1:test_org+test_course+test_run/xqueue/5/" "block@item_id/" + ) + } + ) + body = json.dumps( + { + "student_info": json.dumps({"anonymous_student_id": "student_id"}), + "grader_payload": json.dumps({"grader": "test.py"}), + } + ) result = xqueue_service.send_to_submission(header, body) diff --git a/xmodule/capa/util.py b/xmodule/capa/util.py index ff8a22f07245..f33d2e5a6ddf 100644 --- a/xmodule/capa/util.py +++ b/xmodule/capa/util.py @@ -2,7 +2,6 @@ Utility functions for capa. """ - import logging import re from cmath import isinf, isnan @@ -14,10 +13,10 @@ from openedx.core.djangolib.markup import HTML -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # # Utility functions used in CAPA responsetypes -default_tolerance = '0.001%' +default_tolerance = "0.001%" log = logging.getLogger(__name__) @@ -59,7 +58,7 @@ def compare_with_tolerance(student_complex, instructor_complex, tolerance=defaul if isinstance(tolerance, str): if tolerance == default_tolerance: relative_tolerance = True - if tolerance.endswith('%'): + if tolerance.endswith("%"): tolerance = evaluator({}, {}, tolerance[:-1]) * 0.01 if not relative_tolerance: tolerance = tolerance * abs(instructor_complex) @@ -102,12 +101,13 @@ def contextualize_text(text, context): # private Takes a string with variables. E.g. $a+$b. Does a substitution of those variables from the context """ + def convert_to_str(value): """The method tries to convert unicode/non-ascii values into string""" try: return str(value) except UnicodeEncodeError: - return value.encode('utf8', errors='ignore') + return value.encode("utf8", errors="ignore") if not text: return text @@ -118,8 +118,8 @@ def convert_to_str(value): # program, but also e.g. a reference to the numpy module. # Should be a separate dict of variables that should be # replaced. - context_key = '$' + key - if context_key in (text.decode('utf-8') if isinstance(text, bytes) else text): + context_key = "$" + key + if context_key in (text.decode("utf-8") if isinstance(text, bytes) else text): text = convert_to_str(text) context_value = convert_to_str(context[key]) text = text.replace(context_key, context_value) @@ -151,7 +151,7 @@ def is_file(file_to_test): """ Duck typing to check if 'file_to_test' is a File object """ - return all(hasattr(file_to_test, method) for method in ['read', 'name']) + return all(hasattr(file_to_test, method) for method in ["read", "name"]) def find_with_default(node, path, default): @@ -182,15 +182,15 @@ def sanitize_html(html_code): Used to sanitize XQueue responses from Matlab. """ attributes = nh3.ALLOWED_ATTRIBUTES.copy() - attributes.update({ - '*': {'class', 'style', 'id'}, - 'audio': {'controls', 'autobuffer', 'autoplay', 'src'}, - 'img': {'src', 'width', 'height', 'class'} - }) + attributes.update( + { + "*": {"class", "style", "id"}, + "audio": {"controls", "autobuffer", "autoplay", "src"}, + "img": {"src", "width", "height", "class"}, + } + ) output = nh3.clean( - html_code, - tags=nh3.ALLOWED_TAGS | {'div', 'p', 'audio', 'pre', 'img', 'span'}, - attributes=attributes + html_code, tags=nh3.ALLOWED_TAGS | {"div", "p", "audio", "pre", "img", "span"}, attributes=attributes ) return output @@ -201,10 +201,10 @@ def get_inner_html_from_xpath(xpath_node): """ # returns string from xpath node - html = etree.tostring(xpath_node).strip().decode('utf-8') + html = etree.tostring(xpath_node).strip().decode("utf-8") # strips outer tag from html string # xss-lint: disable=python-interpolate-html - inner_html = re.sub('(?ms)<%s[^>]*>(.*)' % (xpath_node.tag, xpath_node.tag), '\\1', html) + inner_html = re.sub("(?ms)<%s[^>]*>(.*)" % (xpath_node.tag, xpath_node.tag), "\\1", html) return inner_html.strip() diff --git a/xmodule/capa/xqueue_interface.py b/xmodule/capa/xqueue_interface.py index 66b2dc738472..85d3b0e5dd8e 100644 --- a/xmodule/capa/xqueue_interface.py +++ b/xmodule/capa/xqueue_interface.py @@ -1,27 +1,28 @@ """ LMS Interface to external queueing system (xqueue) """ -from typing import Dict, Optional, TYPE_CHECKING import hashlib import json import logging +from typing import TYPE_CHECKING, Dict, Optional import requests from django.conf import settings from django.urls import reverse +from opaque_keys.edx.keys import CourseKey from requests.auth import HTTPBasicAuth + from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag -from opaque_keys.edx.keys import CourseKey from xmodule.capa.xqueue_submission import XQueueInterfaceSubmission if TYPE_CHECKING: from xmodule.capa_block import ProblemBlock log = logging.getLogger(__name__) -dateformat = '%Y%m%d%H%M%S' +dateformat = "%Y%m%d%H%M%S" -XQUEUE_METRIC_NAME = 'edxapp.xqueue' +XQUEUE_METRIC_NAME = "edxapp.xqueue" # Wait time for response from Xqueue. XQUEUE_TIMEOUT = 35 # seconds @@ -38,7 +39,7 @@ # .. toggle_will_remain_in_codebase: True # .. toggle_tickets: none # .. toggle_status: supported -SEND_TO_SUBMISSION_COURSE_FLAG = CourseWaffleFlag('send_to_submission_course.enable', __name__) +SEND_TO_SUBMISSION_COURSE_FLAG = CourseWaffleFlag("send_to_submission_course.enable", __name__) def use_edx_submissions_for_xqueue(course_key: CourseKey | None = None) -> bool: @@ -62,7 +63,7 @@ def make_hashkey(seed): Generate a string key by hashing """ h = hashlib.md5() - h.update(str(seed).encode('latin-1')) + h.update(str(seed).encode("latin-1")) return h.hexdigest() @@ -76,11 +77,7 @@ def make_xheader(lms_callback_url, lms_key, queue_name): 'queue_name': designate a specific queue within xqueue server, e.g. 'MITx-6.00x' (string) } """ - return json.dumps({ - 'lms_callback_url': lms_callback_url, - 'lms_key': lms_key, - 'queue_name': queue_name - }) + return json.dumps({"lms_callback_url": lms_callback_url, "lms_key": lms_key, "queue_name": queue_name}) def parse_xreply(xreply): @@ -94,10 +91,10 @@ def parse_xreply(xreply): xreply = json.loads(xreply) except ValueError as err: log.error(err) - return (1, 'unexpected reply from server') + return (1, "unexpected reply from server") - return_code = xreply['return_code'] - content = xreply['content'] + return_code = xreply["return_code"] + content = xreply["content"] return (return_code, content) @@ -105,9 +102,13 @@ def parse_xreply(xreply): class XQueueInterface: """Initializes the XQueue interface.""" - def __init__(self, url: str, django_auth: Dict[str, str], - requests_auth: Optional[HTTPBasicAuth] = None, - block: 'ProblemBlock' = None): + def __init__( + self, + url: str, + django_auth: Dict[str, str], + requests_auth: Optional[HTTPBasicAuth] = None, + block: "ProblemBlock" = None, + ): """ Initializes the XQueue interface. @@ -142,13 +143,13 @@ def send_to_queue(self, header, body, files_to_upload=None): # log the send to xqueue header_info = json.loads(header) - queue_name = header_info.get('queue_name', '') # lint-amnesty, pylint: disable=unused-variable + queue_name = header_info.get("queue_name", "") # lint-amnesty, pylint: disable=unused-variable # Attempt to send to queue (error, msg) = self._send_to_queue(header, body, files_to_upload) # Log in, then try again - if error and (msg == 'login_required'): + if error and (msg == "login_required"): (error, content) = self._login() if error != 0: # when the login fails @@ -163,17 +164,11 @@ def send_to_queue(self, header, body, files_to_upload=None): return error, msg def _login(self): # lint-amnesty, pylint: disable=missing-function-docstring - payload = { - 'username': self.auth['username'], - 'password': self.auth['password'] - } - return self._http_post(self.url + '/xqueue/login/', payload) + payload = {"username": self.auth["username"], "password": self.auth["password"]} + return self._http_post(self.url + "/xqueue/login/", payload) def _send_to_queue(self, header, body, files_to_upload): # lint-amnesty, pylint: disable=missing-function-docstring - payload = { - 'xqueue_header': header, - 'xqueue_body': body - } + payload = {"xqueue_header": header, "xqueue_body": body} files = {} if files_to_upload is not None: for f in files_to_upload: @@ -185,31 +180,29 @@ def _send_to_queue(self, header, body, files_to_upload): # lint-amnesty, pylint "Unexpected None block: falling back to legacy xqueue submission. " "This may indicate a problem with the xqueue transition." ) - return self._http_post(self.url + '/xqueue/submit/', payload, files=files) + return self._http_post(self.url + "/xqueue/submit/", payload, files=files) course_key = self.block.scope_ids.usage_id.context_key if use_edx_submissions_for_xqueue(course_key): submission = self.submission.send_to_submission(header, body, files) - return None, '' + return None, "" - return self._http_post(self.url + '/xqueue/submit/', payload, files=files) + return self._http_post(self.url + "/xqueue/submit/", payload, files=files) def _http_post(self, url, data, files=None): # lint-amnesty, pylint: disable=missing-function-docstring try: - response = self.session.post( - url, data=data, files=files, timeout=(CONNECT_TIMEOUT, READ_TIMEOUT) - ) + response = self.session.post(url, data=data, files=files, timeout=(CONNECT_TIMEOUT, READ_TIMEOUT)) except requests.exceptions.ConnectionError as err: log.error(err) - return 1, 'cannot connect to server' + return 1, "cannot connect to server" except requests.exceptions.ReadTimeout as err: log.error(err) - return 1, 'failed to read from the server' + return 1, "failed to read from the server" if response.status_code not in [200]: - return 1, 'unexpected HTTP status code [%d]' % response.status_code + return 1, "unexpected HTTP status code [%d]" % response.status_code return parse_xreply(response.text) @@ -222,12 +215,11 @@ class XQueueService: block: The `ProblemBlock` instance. """ - def __init__(self, block: 'ProblemBlock'): - basic_auth = settings.XQUEUE_INTERFACE.get('basic_auth') + def __init__(self, block: "ProblemBlock"): + basic_auth = settings.XQUEUE_INTERFACE.get("basic_auth") requests_auth = HTTPBasicAuth(*basic_auth) if basic_auth else None self._interface = XQueueInterface( - settings.XQUEUE_INTERFACE['url'], settings.XQUEUE_INTERFACE['django_auth'], requests_auth, - block=block + settings.XQUEUE_INTERFACE["url"], settings.XQUEUE_INTERFACE["django_auth"], requests_auth, block=block ) self._block = block @@ -268,7 +260,7 @@ def default_queuename(self) -> str: Returns the default queue name for the current course. """ course_id = self._block.scope_ids.usage_id.context_key - return f'{course_id.org}-{course_id.course}'.replace(' ', '_') + return f"{course_id.org}-{course_id.course}".replace(" ", "_") @property def waittime(self) -> int: diff --git a/xmodule/capa/xqueue_submission.py b/xmodule/capa/xqueue_submission.py index 3d7f061ffc5d..b45e1b8ce87e 100644 --- a/xmodule/capa/xqueue_submission.py +++ b/xmodule/capa/xqueue_submission.py @@ -5,13 +5,14 @@ import json import logging + from xmodule.capa.errors import ( GetSubmissionParamsError, JSONParsingError, MissingKeyError, - ValidationError, + RuntimeErrorSubmission, TypeErrorSubmission, - RuntimeErrorSubmission + ValidationError, ) log = logging.getLogger(__name__) @@ -49,7 +50,7 @@ def get_submission_params(self, header, payload): header = self._parse_json(header, "header") payload = self._parse_json(payload, "payload") - queue_name = header.get('queue_name', 'default') + queue_name = header.get("queue_name", "default") if not self.block: raise GetSubmissionParamsError() @@ -62,7 +63,7 @@ def get_submission_params(self, header, payload): try: grader_payload = self._parse_json(payload["grader_payload"], "grader_payload") - grader_file_name = grader_payload.get("grader", '') + grader_file_name = grader_payload.get("grader", "") except KeyError as e: raise MissingKeyError("grader_payload") from e @@ -76,12 +77,7 @@ def get_submission_params(self, header, payload): if student_answer is None: raise ValidationError("The field 'student_response' does not exist.") - student_dict = { - 'item_id': item_id, - 'item_type': item_type, - 'course_id': course_id, - 'student_id': student_id - } + student_dict = {"item_id": item_id, "item_type": item_type, "course_id": course_id, "student_id": student_id} return student_dict, student_answer, queue_name, grader_file_name, points_possible @@ -91,8 +87,9 @@ def send_to_submission(self, header, body, files_to_upload=None): """ try: from submissions.api import create_external_grader_detail - student_item, answer, queue_name, grader_file_name, points_possible = ( - self.get_submission_params(header, body) + + student_item, answer, queue_name, grader_file_name, points_possible = self.get_submission_params( + header, body ) return create_external_grader_detail( student_item, @@ -100,7 +97,7 @@ def send_to_submission(self, header, body, files_to_upload=None): queue_name=queue_name, grader_file_name=grader_file_name, points_possible=points_possible, - files=files_to_upload + files=files_to_upload, ) except (JSONParsingError, MissingKeyError, ValidationError) as e: log.error("%s", e) diff --git a/xmodule/capa_block.py b/xmodule/capa_block.py index 1a096e76b22d..0ae400b434a7 100644 --- a/xmodule/capa_block.py +++ b/xmodule/capa_block.py @@ -1,6 +1,7 @@ """ Implements the Problem XBlock, which is built on top of the CAPA subsystem. """ + from __future__ import annotations import copy @@ -23,14 +24,14 @@ from pytz import utc from web_fragments.fragment import Fragment from xblock.core import XBlock -from xblock.fields import Boolean, Dict, Float, Integer, Scope, String, XMLString, List +from xblock.fields import Boolean, Dict, Float, Integer, List, Scope, String, XMLString from xblock.scorable import ScorableXBlockMixin, Score from xblocks_contrib.problem import ProblemBlock as _ExtractedProblemBlock from common.djangoapps.xblock_django.constants import ( ATTR_KEY_DEPRECATED_ANONYMOUS_USER_ID, - ATTR_KEY_USER_IS_STAFF, ATTR_KEY_USER_ID, + ATTR_KEY_USER_IS_STAFF, ) from openedx.core.djangolib.markup import HTML, Text from xmodule.capa import responsetypes @@ -43,15 +44,11 @@ from xmodule.exceptions import NotFoundError, ProcessingError from xmodule.graders import ShowCorrectness from xmodule.raw_block import RawMixin -from xmodule.util.builtin_assets import add_webpack_js_to_fragment, add_css_to_fragment +from xmodule.util.builtin_assets import add_css_to_fragment, add_webpack_js_to_fragment from xmodule.util.sandboxing import SandboxService -from xmodule.x_module import ( - ResourceTemplates, - XModuleMixin, - XModuleToXBlockMixin, - shim_xmodule_js -) +from xmodule.x_module import ResourceTemplates, XModuleMixin, XModuleToXBlockMixin, shim_xmodule_js from xmodule.xml_block import XmlMixin + from .capa.xqueue_interface import XQueueService from .fields import Date, ListScoreField, ScoreField, Timedelta from .progress import Progress @@ -69,7 +66,7 @@ try: - FEATURES = getattr(settings, 'FEATURES', {}) + FEATURES = getattr(settings, "FEATURES", {}) except ImproperlyConfigured: FEATURES = {} @@ -78,6 +75,7 @@ class SHOWANSWER: """ Constants for when to show answer """ + ALWAYS = "always" ANSWERED = "answered" ATTEMPTED = "attempted" @@ -96,6 +94,7 @@ class GRADING_METHOD: """ Constants for grading method options. """ + LAST_SCORE = "last_score" FIRST_SCORE = "first_score" HIGHEST_SCORE = "highest_score" @@ -106,6 +105,7 @@ class RANDOMIZATION: """ Constants for problem randomization """ + ALWAYS = "always" ONRESET = "onreset" NEVER = "never" @@ -116,6 +116,7 @@ class Randomization(String): """ Define a field to store how to randomize a problem. """ + def from_json(self, value): if value in ("", "true"): return RANDOMIZATION.ALWAYS @@ -126,13 +127,13 @@ def from_json(self, value): to_json = from_json -@XBlock.needs('user') -@XBlock.needs('i18n') -@XBlock.needs('mako') -@XBlock.needs('cache') -@XBlock.needs('sandbox') -@XBlock.needs('replace_urls') -@XBlock.wants('call_to_action') +@XBlock.needs("user") +@XBlock.needs("i18n") +@XBlock.needs("mako") +@XBlock.needs("cache") +@XBlock.needs("sandbox") +@XBlock.needs("replace_urls") +@XBlock.wants("call_to_action") class _BuiltInProblemBlock( ScorableXBlockMixin, RawMixin, @@ -158,7 +159,8 @@ class _BuiltInProblemBlock( Online - Computer-Assisted Personalized Approach" LMS, from which this system is inspired. """ - INDEX_CONTENT_TYPE = 'CAPA' + + INDEX_CONTENT_TYPE = "CAPA" is_extracted = False @@ -166,11 +168,11 @@ class _BuiltInProblemBlock( has_score = True show_in_read_only_mode = True - template_dir_name = 'problem' + template_dir_name = "problem" mako_template = "widgets/problem-edit.html" has_author_view = True - icon_class = 'problem' + icon_class = "problem" uses_xmodule_styles_setup = True @@ -180,18 +182,19 @@ class _BuiltInProblemBlock( scope=Scope.settings, # it'd be nice to have a useful default but it screws up other things; so, # use display_name_with_default for those - default=_("Blank Problem") + default=_("Blank Problem"), ) attempts = Integer( - help=_("Number of attempts taken by the student on this problem"), - default=0, - scope=Scope.user_state + help=_("Number of attempts taken by the student on this problem"), default=0, scope=Scope.user_state ) max_attempts = Integer( display_name=_("Maximum Attempts"), - help=_("Defines the number of times a student can try to answer this problem. " - "If the value is not set, infinite attempts are allowed."), - values={"min": 0}, scope=Scope.settings + help=_( + "Defines the number of times a student can try to answer this problem. " + "If the value is not set, infinite attempts are allowed." + ), + values={"min": 0}, + scope=Scope.settings, ) grading_method = String( display_name=_("Grading Method"), @@ -210,13 +213,14 @@ class _BuiltInProblemBlock( ) due = Date(help=_("Date that this problem is due by"), scope=Scope.settings) graceperiod = Timedelta( - help=_("Amount of time after the due date that submissions will be accepted"), - scope=Scope.settings + help=_("Amount of time after the due date that submissions will be accepted"), scope=Scope.settings ) show_correctness = String( display_name=_("Show Results"), - help=_("Defines when to show whether a learner's answer to the problem is correct. " - "Configured on the subsection."), + help=_( + "Defines when to show whether a learner's answer to the problem is correct. " + "Configured on the subsection." + ), scope=Scope.settings, default=ShowCorrectness.ALWAYS, values=[ @@ -227,8 +231,7 @@ class _BuiltInProblemBlock( ) showanswer = String( display_name=_("Show Answer"), - help=_("Defines when to show the answer to the problem. " - "A default value can be set in Advanced Settings."), + help=_("Defines when to show the answer to the problem. A default value can be set in Advanced Settings."), scope=Scope.settings, default=SHOWANSWER.FINISHED, values=[ @@ -244,7 +247,7 @@ class _BuiltInProblemBlock( {"display_name": _("After All Attempts"), "value": SHOWANSWER.AFTER_ALL_ATTEMPTS}, {"display_name": _("After All Attempts or Correct"), "value": SHOWANSWER.AFTER_ALL_ATTEMPTS_OR_CORRECT}, {"display_name": _("Attempted"), "value": SHOWANSWER.ATTEMPTED_NO_PAST_DUE}, - ] + ], ) attempts_before_showanswer_button = Integer( display_name=_("Show Answer: Number of Attempts"), @@ -256,22 +259,22 @@ class _BuiltInProblemBlock( scope=Scope.settings, ) force_save_button = Boolean( - help=_("Whether to force the save button to appear on the page"), - scope=Scope.settings, - default=False + help=_("Whether to force the save button to appear on the page"), scope=Scope.settings, default=False ) show_reset_button = Boolean( display_name=_("Show Reset Button"), - help=_("Determines whether a 'Reset' button is shown so the user may reset their answer. " - "A default value can be set in Advanced Settings."), + help=_( + "Determines whether a 'Reset' button is shown so the user may reset their answer. " + "A default value can be set in Advanced Settings." + ), scope=Scope.settings, - default=False + default=False, ) rerandomize = Randomization( display_name=_("Randomization"), help=_( - 'Defines when to randomize the variables specified in the associated Python script. ' - 'For problems that do not randomize values, specify \"Never\". ' + "Defines when to randomize the variables specified in the associated Python script. " + 'For problems that do not randomize values, specify "Never". ' ), default=RANDOMIZATION.NEVER, scope=Scope.settings, @@ -279,20 +282,19 @@ class _BuiltInProblemBlock( {"display_name": _("Always"), "value": RANDOMIZATION.ALWAYS}, {"display_name": _("On Reset"), "value": RANDOMIZATION.ONRESET}, {"display_name": _("Never"), "value": RANDOMIZATION.NEVER}, - {"display_name": _("Per Student"), "value": RANDOMIZATION.PER_STUDENT} - ] + {"display_name": _("Per Student"), "value": RANDOMIZATION.PER_STUDENT}, + ], ) data = XMLString( help=_("XML data for the problem"), scope=Scope.content, - enforce_type=FEATURES.get('ENABLE_XBLOCK_XML_VALIDATION', True), - default="" + enforce_type=FEATURES.get("ENABLE_XBLOCK_XML_VALIDATION", True), + default="", ) - correct_map = Dict(help=_("Dictionary with the correctness of current student answers"), - scope=Scope.user_state, default={}) - correct_map_history = List( - help=_("List of correctness maps for each attempt"), scope=Scope.user_state, default=[] + correct_map = Dict( + help=_("Dictionary with the correctness of current student answers"), scope=Scope.user_state, default={} ) + correct_map_history = List(help=_("List of correctness maps for each attempt"), scope=Scope.user_state, default=[]) input_state = Dict(help=_("Dictionary for maintaining the state of inputtypes"), scope=Scope.user_state) student_answers = Dict(help=_("Dictionary with the current student responses"), scope=Scope.user_state) student_answers_history = List( @@ -304,8 +306,9 @@ class _BuiltInProblemBlock( score_history = ListScoreField( help=_("List of scores for each attempt"), scope=Scope.user_state, default=[], enforce_type=False ) - has_saved_answers = Boolean(help=_("Whether or not the answers have been saved since last submit"), - scope=Scope.user_state, default=False) + has_saved_answers = Boolean( + help=_("Whether or not the answers have been saved since last submit"), scope=Scope.user_state, default=False + ) done = Boolean(help=_("Whether the student has answered the problem"), scope=Scope.user_state, default=False) seed = Integer(help=_("Random seed for this student"), scope=Scope.user_state) last_submission_time = Date(help=_("Last submission time"), scope=Scope.user_state) @@ -313,37 +316,37 @@ class _BuiltInProblemBlock( display_name=_("Timer Between Attempts"), help=_("Seconds a student must wait between submissions for a problem with multiple attempts."), scope=Scope.settings, - default=0) + default=0, + ) weight = Float( display_name=_("Problem Weight"), - help=_("Defines the number of points each problem is worth. " - "If the value is not set, each response field in the problem is worth one point."), - values={"min": 0, "step": .1}, - scope=Scope.settings + help=_( + "Defines the number of points each problem is worth. " + "If the value is not set, each response field in the problem is worth one point." + ), + values={"min": 0, "step": 0.1}, + scope=Scope.settings, ) markdown = String(help=_("Markdown source of this module"), default=None, scope=Scope.settings) source_code = String( - help=_("Source code for LaTeX and Word problems. This feature is not well-supported."), - scope=Scope.settings - ) - use_latex_compiler = Boolean( - help=_("Enable LaTeX templates?"), - default=False, - scope=Scope.settings + help=_("Source code for LaTeX and Word problems. This feature is not well-supported."), scope=Scope.settings ) + use_latex_compiler = Boolean(help=_("Enable LaTeX templates?"), default=False, scope=Scope.settings) matlab_api_key = String( display_name=_("Matlab API key"), - help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. " - "This key is granted for exclusive use by this course for the specified duration. " - "Please do not share the API key with other courses and notify MathWorks immediately " - "if you believe the key is exposed or compromised. To obtain a key for your course, " - "or to report an issue, please contact moocsupport@mathworks.com"), - scope=Scope.settings + help=_( + "Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. " + "This key is granted for exclusive use by this course for the specified duration. " + "Please do not share the API key with other courses and notify MathWorks immediately " + "if you believe the key is exposed or compromised. To obtain a key for your course, " + "or to report an issue, please contact moocsupport@mathworks.com" + ), + scope=Scope.settings, ) markdown_edited = Boolean( help=_("Indicates if the problem was edited using the Markdown editor in the Authoring MFE."), scope=Scope.settings, - default=False + default=False, ) def bind_for_student(self, *args, **kwargs): # lint-amnesty, pylint: disable=signature-differs @@ -352,8 +355,8 @@ def bind_for_student(self, *args, **kwargs): # lint-amnesty, pylint: disable=si # Capa was an XModule. When bind_for_student() was called on it with a new runtime, a new CapaModule object # was initialized when XModuleDescriptor._xmodule() was called next. self.lcp was constructed in CapaModule # init(). To keep the same behaviour, we delete self.lcp in bind_for_student(). - if 'lcp' in self.__dict__: - del self.__dict__['lcp'] + if "lcp" in self.__dict__: + del self.__dict__["lcp"] def student_view(self, _context, show_detailed_errors=False): """ @@ -368,8 +371,8 @@ def student_view(self, _context, show_detailed_errors=False): html = self.get_html() fragment = Fragment(html) add_css_to_fragment(fragment, "ProblemBlockDisplay.css") - add_webpack_js_to_fragment(fragment, 'ProblemBlockDisplay') - shim_xmodule_js(fragment, 'Problem') + add_webpack_js_to_fragment(fragment, "ProblemBlockDisplay") + shim_xmodule_js(fragment, "Problem") return fragment def public_view(self, context): @@ -377,7 +380,7 @@ def public_view(self, context): Return the view seen by users who aren't logged in or who aren't enrolled in the course. """ - if getattr(self.runtime, 'suppports_state_for_anonymous_users', False): + if getattr(self.runtime, "suppports_state_for_anonymous_users", False): # The new XBlock runtime can generally support capa problems for users who aren't logged in, so show the # normal student_view. To prevent anonymous users from viewing specific problems, adjust course policies # and/or content groups. @@ -397,11 +400,11 @@ def studio_view(self, _context): Return the studio view. """ fragment = Fragment( - self.runtime.service(self, 'mako').render_cms_template(self.mako_template, self.get_context()) + self.runtime.service(self, "mako").render_cms_template(self.mako_template, self.get_context()) ) - add_css_to_fragment(fragment, 'ProblemBlockEditor.css') - add_webpack_js_to_fragment(fragment, 'ProblemBlockEditor') - shim_xmodule_js(fragment, 'MarkdownEditingDescriptor') + add_css_to_fragment(fragment, "ProblemBlockEditor.css") + add_webpack_js_to_fragment(fragment, "ProblemBlockEditor") + shim_xmodule_js(fragment, "MarkdownEditingDescriptor") return fragment def handle_ajax(self, dispatch, data): @@ -418,15 +421,15 @@ def handle_ajax(self, dispatch, data): # self.score is initialized in self.lcp but in this method is accessed before self.lcp so just call it first. self.lcp # lint-amnesty, pylint: disable=pointless-statement handlers = { - 'hint_button': self.hint_button, - 'problem_get': self.get_problem, - 'problem_check': self.submit_problem, - 'problem_reset': self.reset_problem, - 'problem_save': self.save_problem, - 'problem_show': self.get_answer, - 'score_update': self.update_score, - 'input_ajax': self.handle_input_ajax, - 'ungraded_response': self.handle_ungraded_response + "hint_button": self.hint_button, + "problem_get": self.get_problem, + "problem_check": self.submit_problem, + "problem_reset": self.reset_problem, + "problem_save": self.save_problem, + "problem_show": self.get_answer, + "score_update": self.update_score, + "input_ajax": self.handle_input_ajax, + "ungraded_response": self.handle_ungraded_response, } _ = self.runtime.service(self, "i18n").gettext @@ -437,12 +440,11 @@ def handle_ajax(self, dispatch, data): ) not_found_error_message = _( - "The state of this problem has changed since you loaded this page. " - "Please refresh your page." + "The state of this problem has changed since you loaded this page. Please refresh your page." ) if dispatch not in handlers: - return f'Error: {dispatch} is not a known capa action' + return f"Error: {dispatch} is not a known capa action" before = self.get_progress() before_attempts = self.attempts @@ -455,7 +457,7 @@ def handle_ajax(self, dispatch, data): "Unable to find data when dispatching %s to %s for user %s", dispatch, self.scope_ids.usage_id, - self.scope_ids.user_id + self.scope_ids.user_id, ) _, _, traceback_obj = sys.exc_info() raise ProcessingError(not_found_error_message).with_traceback(traceback_obj) from ex @@ -465,7 +467,7 @@ def handle_ajax(self, dispatch, data): "Unknown error when dispatching %s to %s for user %s", dispatch, self.scope_ids.usage_id, - self.scope_ids.user_id + self.scope_ids.user_id, ) _, _, traceback_obj = sys.exc_info() raise ProcessingError(generic_error_message).with_traceback(traceback_obj) from ex @@ -475,12 +477,14 @@ def handle_ajax(self, dispatch, data): progress_changed = (after != before) or (after_attempts != before_attempts) curr_score, total_possible = self.get_display_progress() - result.update({ - 'progress_changed': progress_changed, - 'current_score': curr_score, - 'total_possible': total_possible, - 'attempts_used': after_attempts, - }) + result.update( + { + "progress_changed": progress_changed, + "current_score": curr_score, + "total_possible": total_possible, + "attempts_used": after_attempts, + } + ) return json.dumps(result, cls=ComplexEncoder) @@ -520,7 +524,7 @@ def is_grading_method_enabled(self) -> bool: feature is not enabled, the grading method field will not be shown in Studio settings and the default grading method will be used. """ - return settings.FEATURES.get('ENABLE_GRADING_METHOD_IN_PROBLEMS', False) + return settings.FEATURES.get("ENABLE_GRADING_METHOD_IN_PROBLEMS", False) @property def debug(self): @@ -529,7 +533,7 @@ def debug(self): the error in Studio. At the same time, in production, we don't want to show errors to students. """ - return getattr(self.runtime, 'is_author_mode', False) or settings.DEBUG + return getattr(self.runtime, "is_author_mode", False) or settings.DEBUG @classmethod def filter_templates(cls, template, course): @@ -539,15 +543,17 @@ def filter_templates(cls, template, course): Show them only if use_latex_compiler is set to True in course settings. """ - return 'latex' not in template['template_id'] or course.use_latex_compiler + return "latex" not in template["template_id"] or course.use_latex_compiler def get_context(self): _context = EditingMixin.get_context(self) - _context.update({ - 'markdown': self.markdown, - 'enable_markdown': self.markdown is not None, - 'enable_latex_compiler': self.use_latex_compiler, - }) + _context.update( + { + "markdown": self.markdown, + "enable_markdown": self.markdown is not None, + "enable_latex_compiler": self.use_latex_compiler, + } + ) return _context # VS[compat] @@ -556,37 +562,38 @@ def get_context(self): @classmethod def backcompat_paths(cls, path): return [ - 'problems/' + path[8:], + "problems/" + path[8:], path[8:], ] @property def non_editable_metadata_fields(self): non_editable_fields = super().non_editable_metadata_fields - non_editable_fields.extend([ - ProblemBlock.due, - ProblemBlock.graceperiod, - ProblemBlock.force_save_button, - ProblemBlock.markdown, - ProblemBlock.use_latex_compiler, - ProblemBlock.show_correctness, - - # Temporarily remove the ability to see MATLAB API key in Studio, as - # a pre-cursor to removing it altogether. - # https://github.com/openedx/public-engineering/issues/192 - ProblemBlock.matlab_api_key, - ]) + non_editable_fields.extend( + [ + ProblemBlock.due, + ProblemBlock.graceperiod, + ProblemBlock.force_save_button, + ProblemBlock.markdown, + ProblemBlock.use_latex_compiler, + ProblemBlock.show_correctness, + # Temporarily remove the ability to see MATLAB API key in Studio, as + # a pre-cursor to removing it altogether. + # https://github.com/openedx/public-engineering/issues/192 + ProblemBlock.matlab_api_key, + ] + ) if not self.is_grading_method_enabled: non_editable_fields.append(ProblemBlock.grading_method) return non_editable_fields @property def problem_types(self): - """ Low-level problem type introspection for content libraries filtering by problem type """ + """Low-level problem type introspection for content libraries filtering by problem type""" try: tree = etree.XML(self.data) except etree.XMLSyntaxError: - log.error(f'Error parsing problem types from xml for capa block {self.display_name}') + log.error(f"Error parsing problem types from xml for capa block {self.display_name}") return None # short-term fix to prevent errors (TNL-5057). Will be more properly addressed in TNL-4525. registered_tags = responsetypes.registry.registered_tags() return {node.tag for node in tree.iter() if node.tag in registered_tags} @@ -598,7 +605,7 @@ def index_dictionary(self): xblock_body = super().index_dictionary() # Make optioninput's options index friendly by replacing the actual tag with the values - capa_content = re.sub(r'\s*|\S*<\/optioninput>', r'\1', self.data) + capa_content = re.sub(r'\s*|\S*<\/optioninput>', r"\1", self.data) # Remove the following tags with content that can leak hints or solutions: # - `solution` (with optional attributes) and `solutionset`. @@ -617,20 +624,16 @@ def index_dictionary(self): .*? | <[a-z]*hint.*?>.*? """, - re.DOTALL | - re.VERBOSE), + re.DOTALL | re.VERBOSE, + ), "", - capa_content + capa_content, ) # Strip out all other tags, leaving their content. But we want spaces between adjacent tags, so that #
                    Option A
                    Option B
                    # becomes "Option A Option B" not "Option AOption B" (these will appear in search results) capa_content = re.sub(r"<([^>]+)>", r" <\2>", capa_content) - capa_content = re.sub( - r"(\s| |//)+", - " ", - nh3.clean(capa_content, tags=set()) - ).strip() + capa_content = re.sub(r"(\s| |//)+", " ", nh3.clean(capa_content, tags=set())).strip() capa_body = { "capa_content": capa_content, @@ -653,8 +656,7 @@ def has_support(self, view, functionality): if functionality == "multi_device": types = self.problem_types # Avoid calculating this property twice return types is not None and all( - responsetypes.registry.get_class_for_tag(tag).multi_device_support - for tag in types + responsetypes.registry.get_class_for_tag(tag).multi_device_support for tag in types ) return False @@ -712,7 +714,7 @@ def generate_report_data(self, user_state_iterator, limit_responses=None): "Answer ID": "98e6a8e915904d5389821a94e48babcf_10_1" }) """ - if self.category != 'problem': + if self.category != "problem": raise NotImplementedError() if limit_responses == 0: @@ -743,7 +745,7 @@ def generate_report_data(self, user_state_iterator, limit_responses=None): count = 0 for user_state in user_state_iterator: - if 'student_answers' not in user_state.state: + if "student_answers" not in user_state.state: continue try: lcp = LoncapaProblem( @@ -753,14 +755,14 @@ def generate_report_data(self, user_state_iterator, limit_responses=None): # We choose to run without a fully initialized CapaModule capa_block=None, state={ - 'done': user_state.state.get('done'), - 'correct_map': user_state.state.get('correct_map'), - 'student_answers': user_state.state.get('student_answers'), - 'has_saved_answers': user_state.state.get('has_saved_answers'), - 'input_state': user_state.state.get('input_state'), - 'seed': user_state.state.get('seed'), + "done": user_state.state.get("done"), + "correct_map": user_state.state.get("correct_map"), + "student_answers": user_state.state.get("student_answers"), + "has_saved_answers": user_state.state.get("has_saved_answers"), + "input_state": user_state.state.get("input_state"), + "seed": user_state.state.get("seed"), }, - seed=user_state.state.get('seed'), + seed=user_state.state.get("seed"), # extract_tree=False allows us to work without a fully initialized CapaModule # We'll still be able to find particular data in the XML when we need it extract_tree=False, @@ -770,7 +772,7 @@ def generate_report_data(self, user_state_iterator, limit_responses=None): # Some types of problems have data in lcp.student_answers that isn't in lcp.problem_data. # E.g. formulae do this to store the MathML version of the answer. # We exclude these rows from the report because we only need the text-only answer. - if answer_id.endswith('_dynamath'): + if answer_id.endswith("_dynamath"): continue if limit_responses and count >= limit_responses: @@ -794,8 +796,9 @@ def generate_report_data(self, user_state_iterator, limit_responses=None): # Capture a backtrace for errors from failed loncapa problems log.exception( "An error occurred generating a problem report on course %s, problem %s, and student %s", - self.course_id, self.scope_ids.usage_id, - self.scope_ids.user_id + self.course_id, + self.scope_ids.usage_id, + self.scope_ids.user_id, ) # Also input error in report report = { @@ -812,8 +815,8 @@ def course_end_date(self): """ try: - course_block_key = self.runtime.course_entry.structure['root'] - return self.runtime.course_entry.structure['blocks'][course_block_key].fields['end'] + course_block_key = self.runtime.course_entry.structure["root"] + return self.runtime.course_entry.structure["blocks"][course_block_key].fields["end"] except (AttributeError, KeyError): return None @@ -843,8 +846,7 @@ def lcp(self): # lint-amnesty, pylint: disable=method-hidden, missing-function- try: lcp = self.new_lcp(self.get_state_for_lcp()) except Exception as err: # pylint: disable=broad-except - msg = 'cannot create LoncapaProblem {loc}: {err}'.format( - loc=str(self.location), err=err) + msg = "cannot create LoncapaProblem {loc}: {err}".format(loc=str(self.location), err=err) raise LoncapaProblemError(msg).with_traceback(sys.exc_info()[2]) if self.score is None: @@ -860,11 +862,11 @@ def choose_new_seed(self): if self.rerandomize == RANDOMIZATION.NEVER: self.seed = 1 elif self.rerandomize == RANDOMIZATION.PER_STUDENT: - user_id = self.runtime.service(self, 'user').get_current_user().opt_attrs.get(ATTR_KEY_USER_ID) or 0 + user_id = self.runtime.service(self, "user").get_current_user().opt_attrs.get(ATTR_KEY_USER_ID) or 0 # see comment on randomization_bin - self.seed = randomization_bin(user_id, str(self.location).encode('utf-8')) + self.seed = randomization_bin(user_id, str(self.location).encode("utf-8")) else: - self.seed = struct.unpack('i', os.urandom(4))[0] + self.seed = struct.unpack("i", os.urandom(4))[0] # So that sandboxed code execution can be cached, but still have an interesting # number of possibilities, cap the number of different random seeds. @@ -877,14 +879,14 @@ def new_lcp(self, state, text=None): if text is None: text = self.data - user_service = self.runtime.service(self, 'user') + user_service = self.runtime.service(self, "user") anonymous_student_id = user_service.get_current_user().opt_attrs.get(ATTR_KEY_DEPRECATED_ANONYMOUS_USER_ID) seed = user_service.get_current_user().opt_attrs.get(ATTR_KEY_USER_ID) or 0 - sandbox_service = self.runtime.service(self, 'sandbox') - cache_service = self.runtime.service(self, 'cache') + sandbox_service = self.runtime.service(self, "sandbox") + cache_service = self.runtime.service(self, "cache") - is_studio = getattr(self.runtime, 'is_author_mode', False) + is_studio = getattr(self.runtime, "is_author_mode", False) capa_system = LoncapaSystem( ajax_url=self.ajax_url, @@ -894,11 +896,11 @@ def new_lcp(self, state, text=None): get_python_lib_zip=sandbox_service.get_python_lib_zip, DEBUG=self.debug, i18n=self.runtime.service(self, "i18n"), - render_template=self.runtime.service(self, 'mako').render_template, + render_template=self.runtime.service(self, "mako").render_template, resources_fs=self.runtime.resources_fs, seed=seed, # Why do we do this if we have self.seed? xqueue=None if is_studio else XQueueService(self), - matlab_api_key=self.matlab_api_key + matlab_api_key=self.matlab_api_key, ) return LoncapaProblem( @@ -915,13 +917,13 @@ def get_state_for_lcp(self): Give a dictionary holding the state of the module """ return { - 'done': self.done, - 'correct_map': self.correct_map, - 'correct_map_history': self.correct_map_history, - 'student_answers': self.student_answers, - 'has_saved_answers': self.has_saved_answers, - 'input_state': self.input_state, - 'seed': self.get_seed(), + "done": self.done, + "correct_map": self.correct_map, + "correct_map_history": self.correct_map_history, + "student_answers": self.student_answers, + "has_saved_answers": self.has_saved_answers, + "input_state": self.input_state, + "seed": self.get_seed(), } def set_state_from_lcp(self): @@ -929,12 +931,12 @@ def set_state_from_lcp(self): Set the module's state from the settings in `self.lcp` """ lcp_state = self.lcp.get_state() - self.done = lcp_state['done'] - self.correct_map = lcp_state['correct_map'] - self.correct_map_history = lcp_state['correct_map_history'] - self.input_state = lcp_state['input_state'] - self.student_answers = lcp_state['student_answers'] - self.has_saved_answers = lcp_state['has_saved_answers'] + self.done = lcp_state["done"] + self.correct_map = lcp_state["correct_map"] + self.correct_map_history = lcp_state["correct_map_history"] + self.input_state = lcp_state["input_state"] + self.student_answers = lcp_state["student_answers"] + self.has_saved_answers = lcp_state["has_saved_answers"] def set_last_submission_time(self): """ @@ -976,7 +978,7 @@ def get_display_progress(self): Return (score, total) to be displayed to the learner. """ progress = self.get_progress() - score, total = (progress.frac() if progress else (0, 0)) + score, total = progress.frac() if progress else (0, 0) # Withhold the score if hiding correctness if not self.correctness_available(): @@ -990,28 +992,30 @@ def get_html(self): """ curr_score, total_possible = self.get_display_progress() - return self.runtime.service(self, 'mako').render_lms_template('problem_ajax.html', { - 'element_id': self.location.html_id(), - 'id': str(self.location), - 'ajax_url': self.ajax_url, - 'current_score': curr_score, - 'total_possible': total_possible, - 'attempts_used': self.attempts, - 'content': self.get_problem_html(encapsulate=False), - 'graded': self.graded, # pylint: disable=no-member - }) + return self.runtime.service(self, "mako").render_lms_template( + "problem_ajax.html", + { + "element_id": self.location.html_id(), + "id": str(self.location), + "ajax_url": self.ajax_url, + "current_score": curr_score, + "total_possible": total_possible, + "attempts_used": self.attempts, + "content": self.get_problem_html(encapsulate=False), + "graded": self.graded, # pylint: disable=no-member + }, + ) def handle_fatal_lcp_error(self, error): # lint-amnesty, pylint: disable=missing-function-docstring log.exception(f"LcpFatalError Encountered for {str(self.location)}") if error: - return( - HTML('

                    Error formatting HTML for problem:

                    {msg}

                    ').format( - msg=str(error)) + return HTML('

                    Error formatting HTML for problem:

                    {msg}

                    ').format( + msg=str(error) ) else: return HTML( - '

                    Could not format HTML for problem. ' - 'Contact course staff in the discussion forum for assistance.

                    ' + "

                    Could not format HTML for problem. " + "Contact course staff in the discussion forum for assistance.

                    " ) def submit_button_name(self): @@ -1021,7 +1025,7 @@ def submit_button_name(self): # The logic flow is a little odd so that _('xxx') strings can be found for # translation while also running _() just once for each string. _ = self.runtime.service(self, "i18n").gettext - submit = _('Submit') + submit = _("Submit") return submit @@ -1034,13 +1038,13 @@ def submit_button_submitting_name(self): received by the server. """ _ = self.runtime.service(self, "i18n").gettext - return _('Submitting') + return _("Submitting") def should_enable_submit_button(self): """ Return True/False to indicate whether to enable the "Submit" button. """ - submitted_without_reset = (self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS) + submitted_without_reset = self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS # If the problem is closed (past due / too many attempts) # then we disable the "submit" button @@ -1055,7 +1059,7 @@ def should_show_reset_button(self): """ Return True/False to indicate whether to show the "Reset" button. """ - is_survey_question = (self.max_attempts == 0) + is_survey_question = self.max_attempts == 0 # If the problem is closed (and not a survey question with max_attempts==0), # then do NOT show the reset button. @@ -1083,7 +1087,7 @@ def should_show_save_button(self): if self.force_save_button: return not self.closed() else: - is_survey_question = (self.max_attempts == 0) + is_survey_question = self.max_attempts == 0 needs_reset = self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS # If the student has unlimited attempts, and their answers @@ -1121,22 +1125,14 @@ def handle_problem_html_error(self, err): """ problem_display_name = self.display_name_with_default problem_location = str(self.location) - log.exception( - "ProblemGetHtmlError: %r, %r, %s", - problem_display_name, - problem_location, - str(err) - ) + log.exception("ProblemGetHtmlError: %r, %r, %s", problem_display_name, problem_location, str(err)) if self.debug: - msg = HTML( - '[courseware.capa.capa_block] ' - 'Failed to generate HTML for problem {url}' - ).format( + msg = HTML("[courseware.capa.capa_block] Failed to generate HTML for problem {url}").format( url=str(self.location) ) - msg += HTML('

                    Error:

                    {msg}

                    ').format(msg=str(err)) - msg += HTML('

                    {tb}

                    ').format(tb=traceback.format_exc()) + msg += HTML("

                    Error:

                    {msg}

                    ").format(msg=str(err)) + msg += HTML("

                    {tb}

                    ").format(tb=traceback.format_exc()) html = msg else: @@ -1152,7 +1148,7 @@ def handle_problem_html_error(self, err): # Some inputtypes, such as dynamath, have additional "hidden" state that # is not exposed to the student. Keep those hidden # TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id - hidden_state_keywords = ['dynamath'] + hidden_state_keywords = ["dynamath"] for answer_id in answer_ids: for hidden_state_keyword in hidden_state_keywords: if answer_id.find(hidden_state_keyword) >= 0: @@ -1169,14 +1165,14 @@ def handle_problem_html_error(self, err): # Translators: Following this message, there will be a bulleted list of items. warning_msg = _("The problem's state was corrupted by an invalid submission. The submission consisted of:") - warning += HTML('{}
                      ').format(warning_msg) + warning += HTML("{}
                        ").format(warning_msg) for student_answer in student_answers.values(): - if student_answer != '': - warning += HTML('
                      • {}
                      • ').format(student_answer) + if student_answer != "": + warning += HTML("
                      • {}
                      • ").format(student_answer) - warning_msg = _('If this error persists, please contact the course staff.') - warning += HTML('
                      {}
                    ').format(warning_msg) + warning_msg = _("If this error persists, please contact the course staff.") + warning += HTML("{}
                    ").format(warning_msg) html = warning try: @@ -1187,7 +1183,7 @@ def handle_problem_html_error(self, err): "ProblemGetHtmlError: Unable to generate html from LoncapaProblem: %r, %r, %s", problem_display_name, problem_location, - str(error) + str(error), ) raise @@ -1225,15 +1221,15 @@ def get_demand_hint(self, hint_index): _ = self.runtime.service(self, "i18n").gettext counter = 0 - total_text = '' + total_text = "" while counter <= hint_index: # Translators: {previous_hints} is the HTML of hints that have already been generated, {hint_number_prefix} # is a header for this hint, and {hint_text} is the text of the hint itself. # This string is being passed to translation only for possible reordering of the placeholders. - total_text = HTML(_('{previous_hints}{list_start_tag}{strong_text}{hint_text}')).format( + total_text = HTML(_("{previous_hints}{list_start_tag}{strong_text}{hint_text}")).format( previous_hints=HTML(total_text), list_start_tag=HTML('
                  • ').format(counter=counter), - strong_text=HTML('{hint_number_prefix}').format( + strong_text=HTML("{hint_number_prefix}").format( # Translators: e.g. "Hint 1 of 3: " meaning we are showing the first of three hints. # This text is shown in bold before the accompanying hint text. hint_number_prefix=Text(_("Hint ({hint_num} of {hints_count}): ")).format( @@ -1241,31 +1237,33 @@ def get_demand_hint(self, hint_index): ) ), # Course-authored HTML demand hints are supported. - hint_text=HTML(self.runtime.service(self, "replace_urls").replace_urls( - get_inner_html_from_xpath(demand_hints[counter]) - )) + hint_text=HTML( + self.runtime.service(self, "replace_urls").replace_urls( + get_inner_html_from_xpath(demand_hints[counter]) + ) + ), ) counter += 1 - total_text = HTML('
                      {hints}
                    ').format(hints=total_text) + total_text = HTML("
                      {hints}
                    ").format(hints=total_text) # Log this demand-hint request. Note that this only logs the last hint requested (although now # all previously shown hints are still displayed). event_info = {} - event_info['module_id'] = str(self.location) - event_info['hint_index'] = hint_index - event_info['hint_len'] = len(demand_hints) - event_info['hint_text'] = get_inner_html_from_xpath(demand_hints[hint_index]) - self.runtime.publish(self, 'edx.problem.hint.demandhint_displayed', event_info) + event_info["module_id"] = str(self.location) + event_info["hint_index"] = hint_index + event_info["hint_len"] = len(demand_hints) + event_info["hint_text"] = get_inner_html_from_xpath(demand_hints[hint_index]) + self.runtime.publish(self, "edx.problem.hint.demandhint_displayed", event_info) _, should_enable_next_hint = self._should_enable_demand_hint(demand_hints=demand_hints, hint_index=hint_index) # We report the index of this hint, the client works out what index to use to get the next hint return { - 'success': True, - 'hint_index': hint_index, - 'should_enable_next_hint': should_enable_next_hint, - 'msg': total_text, + "success": True, + "hint_index": hint_index, + "should_enable_next_hint": should_enable_next_hint, + "msg": total_text, } def get_problem_html(self, encapsulate=True, submit_notification=False): @@ -1297,12 +1295,12 @@ def get_problem_html(self, encapsulate=True, submit_notification=False): if not should_enable_submit_button: cta_service = self.runtime.service(self, "call_to_action") if cta_service: - submit_disabled_ctas = cta_service.get_ctas(self, 'capa_submit_disabled') + submit_disabled_ctas = cta_service.get_ctas(self, "capa_submit_disabled") content = { - 'name': self.display_name_with_default, - 'html': smart_str(html), - 'weight': self.weight, + "name": self.display_name_with_default, + "html": smart_str(html), + "weight": self.weight, } # If demand hints are available, emit hint button and div. @@ -1310,37 +1308,38 @@ def get_problem_html(self, encapsulate=True, submit_notification=False): demand_hint_possible, should_enable_next_hint = self._should_enable_demand_hint(demand_hints=demand_hints) answer_notification_type, answer_notification_message = self._get_answer_notification( - render_notifications=submit_notification) + render_notifications=submit_notification + ) save_message = None if self.has_saved_answers: - save_message = _( - "Your answers were previously saved. Click '{button_name}' to grade them." - ).format(button_name=self.submit_button_name()) + save_message = _("Your answers were previously saved. Click '{button_name}' to grade them.").format( + button_name=self.submit_button_name() + ) context = { - 'problem': content, - 'id': str(self.location), - 'short_id': self.location.html_id(), - 'submit_button': submit_button, - 'submit_button_submitting': submit_button_submitting, - 'should_enable_submit_button': should_enable_submit_button, - 'reset_button': self.should_show_reset_button(), - 'save_button': self.should_show_save_button(), - 'answer_available': self.answer_available(), - 'grading_method': self.grading_method_display_name(), - 'attempts_used': self.attempts, - 'attempts_allowed': self.max_attempts, - 'demand_hint_possible': demand_hint_possible, - 'should_enable_next_hint': should_enable_next_hint, - 'answer_notification_type': answer_notification_type, - 'answer_notification_message': answer_notification_message, - 'has_saved_answers': self.has_saved_answers, - 'save_message': save_message, - 'submit_disabled_cta': submit_disabled_ctas[0] if submit_disabled_ctas else None, + "problem": content, + "id": str(self.location), + "short_id": self.location.html_id(), + "submit_button": submit_button, + "submit_button_submitting": submit_button_submitting, + "should_enable_submit_button": should_enable_submit_button, + "reset_button": self.should_show_reset_button(), + "save_button": self.should_show_save_button(), + "answer_available": self.answer_available(), + "grading_method": self.grading_method_display_name(), + "attempts_used": self.attempts, + "attempts_allowed": self.max_attempts, + "demand_hint_possible": demand_hint_possible, + "should_enable_next_hint": should_enable_next_hint, + "answer_notification_type": answer_notification_type, + "answer_notification_message": answer_notification_message, + "has_saved_answers": self.has_saved_answers, + "save_message": save_message, + "submit_disabled_cta": submit_disabled_ctas[0] if submit_disabled_ctas else None, } - html = self.runtime.service(self, 'mako').render_lms_template('problem.html', context) + html = self.runtime.service(self, "mako").render_lms_template("problem.html", context) if encapsulate: html = HTML('
                    {html}
                    ').format( @@ -1369,7 +1368,7 @@ def _get_answer_notification(self, render_notifications): # Show only a generic message if hiding correctness if not self.correctness_available(): - answer_notification_type = 'submitted' + answer_notification_type = "submitted" elif len(id_list) == 1: # Only one answer available answer_notification_type = self.lcp.correct_map.get_correctness(id_list[0]) @@ -1381,40 +1380,36 @@ def _get_answer_notification(self, render_notifications): # There is at least 1 of the following combinations of correctness states # Correct and incorrect, Correct and partially correct, or Incorrect and partially correct # which all should have a message type of Partially Correct - answer_notification_type = 'partially-correct' + answer_notification_type = "partially-correct" break # Build the notification message based on the notification type and translate it. ngettext = self.runtime.service(self, "i18n").ngettext _ = self.runtime.service(self, "i18n").gettext - if answer_notification_type == 'incorrect': + if answer_notification_type == "incorrect": if progress is not None: answer_notification_message = ngettext( - "Incorrect ({progress} point)", - "Incorrect ({progress} points)", - progress.frac()[1] + "Incorrect ({progress} point)", "Incorrect ({progress} points)", progress.frac()[1] ).format(progress=str(progress)) else: - answer_notification_message = _('Incorrect') - elif answer_notification_type == 'correct': + answer_notification_message = _("Incorrect") + elif answer_notification_type == "correct": if progress is not None: answer_notification_message = ngettext( - "Correct ({progress} point)", - "Correct ({progress} points)", - progress.frac()[1] + "Correct ({progress} point)", "Correct ({progress} points)", progress.frac()[1] ).format(progress=str(progress)) else: - answer_notification_message = _('Correct') - elif answer_notification_type == 'partially-correct': + answer_notification_message = _("Correct") + elif answer_notification_type == "partially-correct": if progress is not None: answer_notification_message = ngettext( "Partially correct ({progress} point)", "Partially correct ({progress} points)", - progress.frac()[1] + progress.frac()[1], ).format(progress=str(progress)) else: - answer_notification_message = _('Partially Correct') - elif answer_notification_type == 'submitted': + answer_notification_message = _("Partially Correct") + elif answer_notification_type == "submitted": answer_notification_message = _("Answer submitted.") return answer_notification_type, answer_notification_message @@ -1424,11 +1419,24 @@ def remove_tags_from_html(self, html): The capa xml includes many tags such as or which are not meant to be part of the client html. We strip them all and return the resulting html. """ - tags = ['demandhint', 'choicehint', 'optionhint', 'stringhint', 'numerichint', 'optionhint', - 'correcthint', 'regexphint', 'additional_answer', 'stringequalhint', 'compoundhint', - 'stringequalhint'] + tags = [ + "demandhint", + "choicehint", + "optionhint", + "stringhint", + "numerichint", + "optionhint", + "correcthint", + "regexphint", + "additional_answer", + "stringequalhint", + "compoundhint", + "stringequalhint", + ] for tag in tags: - html = re.sub(fr'<{tag}.*?>.*?', '', html, flags=re.DOTALL) # xss-lint: disable=python-interpolate-html # lint-amnesty, pylint: disable=line-too-long + html = re.sub( + rf"<{tag}.*?>.*?", "", html, flags=re.DOTALL + ) # xss-lint: disable=python-interpolate-html # lint-amnesty, pylint: disable=line-too-long # Some of these tags span multiple lines # Note: could probably speed this up by calling sub() once with a big regex # vs. simply calling sub() many times as we have here. @@ -1438,19 +1446,18 @@ def hint_button(self, data): """ Hint button handler, returns new html using hint_index from the client. """ - hint_index = int(data['hint_index']) + hint_index = int(data["hint_index"]) return self.get_demand_hint(hint_index) def used_all_attempts(self): - """ All attempts have been used """ + """All attempts have been used""" return self.max_attempts is not None and self.attempts >= self.max_attempts def is_past_due(self): """ Is it now past this problem's due date, including grace period? """ - return (self.close_date is not None and - datetime.datetime.now(utc) > self.close_date) + return self.close_date is not None and datetime.datetime.now(utc) > self.close_date def closed(self): """ @@ -1494,11 +1501,11 @@ def answer_available(self): """ Is the user allowed to see an answer? """ - user_is_staff = self.runtime.service(self, 'user').get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) + user_is_staff = self.runtime.service(self, "user").get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) if not self.correctness_available(): # If correctness is being withheld, then don't show answers either. return False - elif self.showanswer == '': + elif self.showanswer == "": return False elif self.showanswer == SHOWANSWER.NEVER: return False @@ -1542,7 +1549,7 @@ def correctness_available(self): Limits access to the correct/incorrect flags, messages, and problem score. """ - user_is_staff = self.runtime.service(self, 'user').get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) + user_is_staff = self.runtime.service(self, "user").get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) return ShowCorrectness.correctness_available( show_correctness=self.show_correctness, due_date=self.close_date, @@ -1559,8 +1566,8 @@ def update_score(self, data): No ajax return is needed. Return empty dict. """ - queuekey = data['queuekey'] - score_msg = data['xqueue_body'] + queuekey = data["queuekey"] + score_msg = data["xqueue_body"] self.lcp.update_score(score_msg, queuekey) self.set_state_from_lcp() self.set_score(self.score_from_lcp(self.lcp)) @@ -1583,8 +1590,8 @@ def handle_ungraded_response(self, data): No ajax return is needed, so an empty dict is returned """ - queuekey = data['queuekey'] - score_msg = data['xqueue_body'] + queuekey = data["queuekey"] + score_msg = data["xqueue_body"] # pass along the xqueue message to the problem self.lcp.ungraded_response(score_msg, queuekey) @@ -1618,10 +1625,10 @@ def get_answer(self, _data): (and also screen reader text). """ event_info = {} - event_info['problem_id'] = str(self.location) - self.publish_unmasked('showanswer', event_info) + event_info["problem_id"] = str(self.location) + self.publish_unmasked("showanswer", event_info) if not self.answer_available(): # lint-amnesty, pylint: disable=no-else-raise - raise NotFoundError('Answer is not available') + raise NotFoundError("Answer is not available") else: answers = self.lcp.get_question_answers() self.set_state_from_lcp() @@ -1634,17 +1641,15 @@ def get_answer(self, _data): answer_content = self.runtime.service(self, "replace_urls").replace_urls(answers[answer_id]) new_answer = {answer_id: answer_content} except TypeError: - log.debug('Unable to perform URL substitution on answers[%s]: %s', - answer_id, answers[answer_id]) + log.debug("Unable to perform URL substitution on answers[%s]: %s", answer_id, answers[answer_id]) new_answer = {answer_id: answers[answer_id]} new_answers.update(new_answer) return { - 'answers': new_answers, - 'correct_status_html': self.runtime.service(self, 'mako').render_lms_template( - 'status_span.html', - {'status': Status('correct', self.runtime.service(self, "i18n").gettext)} - ) + "answers": new_answers, + "correct_status_html": self.runtime.service(self, "mako").render_lms_template( + "status_span.html", {"status": Status("correct", self.runtime.service(self, "i18n").gettext)} + ), } # Figure out if we should move these to capa_problem? @@ -1656,7 +1661,7 @@ def get_problem(self, _data): Used if we want to reconfirm we have the right thing e.g. after several AJAX calls. """ - return {'html': self.get_problem_html(encapsulate=False, submit_notification=True)} + return {"html": self.get_problem_html(encapsulate=False, submit_notification=True)} @staticmethod def make_dict_of_responses(data): @@ -1700,7 +1705,7 @@ def make_dict_of_responses(data): # We only want to consider each key a single time, so we use set(data.keys()) for key in set(data.keys()): # e.g. input_resistor_1 ==> resistor_1 - _, _, name = key.partition('_') + _, _, name = key.partition("_") # If key has no underscores, then partition # will return (key, '', '') @@ -1715,8 +1720,8 @@ def make_dict_of_responses(data): # answer will be an array. # if the name ends with '{}' (Which looks like a dict), # then the answer will be a dict - is_list_key = name.endswith('[]') - is_dict_key = name.endswith('{}') + is_list_key = name.endswith("[]") + is_dict_key = name.endswith("{}") name = name[:-2] if is_list_key or is_dict_key else name if is_list_key: @@ -1725,7 +1730,7 @@ def make_dict_of_responses(data): try: val = json.loads(data[key]) # If the submission wasn't deserializable, raise an error. - except(KeyError, ValueError): + except (KeyError, ValueError): raise ValueError( # lint-amnesty, pylint: disable=raise-missing-from f"Invalid submission: {data[key]} for {key}" ) @@ -1748,16 +1753,16 @@ def publish_grade(self, score=None, only_if_higher=None, **kwargs): if not score: score = self.score event = { - 'value': score.raw_earned, - 'max_value': score.raw_possible, - 'only_if_higher': only_if_higher, + "value": score.raw_earned, + "max_value": score.raw_possible, + "only_if_higher": only_if_higher, } - if kwargs.get('grader_response'): - event['grader_response'] = kwargs['grader_response'] + if kwargs.get("grader_response"): + event["grader_response"] = kwargs["grader_response"] - self.runtime.publish(self, 'grade', event) + self.runtime.publish(self, "grade", event) - return {'grade': self.score.raw_earned, 'max_grade': self.score.raw_possible} + return {"grade": self.score.raw_earned, "max_grade": self.score.raw_possible} # pylint: disable=too-many-statements def submit_problem(self, data, override_time=False): @@ -1769,16 +1774,16 @@ def submit_problem(self, data, override_time=False): 'contents' : html} """ event_info = {} - event_info['state'] = self.lcp.get_state() - event_info['problem_id'] = str(self.location) + event_info["state"] = self.lcp.get_state() + event_info["problem_id"] = str(self.location) self.lcp.has_saved_answers = False answers = self.make_dict_of_responses(data) answers_without_files = convert_files_to_filenames(answers) self.student_answers_history.append(answers_without_files) - event_info['answers'] = answers_without_files + event_info["answers"] = answers_without_files - metric_name = 'xmodule.capa.check_problem.{}'.format # lint-amnesty, pylint: disable=unused-variable + metric_name = "xmodule.capa.check_problem.{}".format # lint-amnesty, pylint: disable=unused-variable # Can override current time current_time = datetime.datetime.now(utc) if override_time is not False: @@ -1789,7 +1794,7 @@ def submit_problem(self, data, override_time=False): # Too late. Cannot submit if self.closed(): log.error( - 'ProblemClosedError: Problem %s, close date: %s, due:%s, is_past_due: %s, attempts: %s/%s,', + "ProblemClosedError: Problem %s, close date: %s, due:%s, is_past_due: %s, attempts: %s/%s,", str(self.location), self.close_date, self.due, @@ -1797,14 +1802,14 @@ def submit_problem(self, data, override_time=False): self.attempts, self.max_attempts, ) - event_info['failure'] = 'closed' - self.publish_unmasked('problem_check_fail', event_info) + event_info["failure"] = "closed" + self.publish_unmasked("problem_check_fail", event_info) raise NotFoundError(_("Problem is closed.")) # Problem submitted. Student should reset before checking again if self.done and self.rerandomize == RANDOMIZATION.ALWAYS: - event_info['failure'] = 'unreset' - self.publish_unmasked('problem_check_fail', event_info) + event_info["failure"] = "unreset" + self.publish_unmasked("problem_check_fail", event_info) raise NotFoundError(_("Problem must be reset before it can be submitted again.")) # Problem queued. Students must wait a specified waittime before they are allowed to submit @@ -1816,26 +1821,25 @@ def submit_problem(self, data, override_time=False): waittime_between_requests = xqueue_service.waittime if xqueue_service else 0 if (current_time - prev_submit_time).total_seconds() < waittime_between_requests: msg = _("You must wait at least {wait} seconds between submissions.").format( - wait=waittime_between_requests) - return {'success': msg, 'html': ''} + wait=waittime_between_requests + ) + return {"success": msg, "html": ""} # Wait time between resets: check if is too soon for submission. if self.last_submission_time is not None and self.submission_wait_seconds not in [0, None]: seconds_since_submission = (current_time - self.last_submission_time).total_seconds() if seconds_since_submission < self.submission_wait_seconds: remaining_secs = int(self.submission_wait_seconds - seconds_since_submission) - msg = _('You must wait at least {wait_secs} between submissions. {remaining_secs} remaining.').format( + msg = _("You must wait at least {wait_secs} between submissions. {remaining_secs} remaining.").format( wait_secs=self.pretty_print_seconds(self.submission_wait_seconds), - remaining_secs=self.pretty_print_seconds(remaining_secs)) - return { - 'success': msg, - 'html': '' - } + remaining_secs=self.pretty_print_seconds(remaining_secs), + ) + return {"success": msg, "html": ""} try: # expose the attempt number to a potential python custom grader # self.lcp.context['attempt'] refers to the attempt number (1-based) - self.lcp.context['attempt'] = self.attempts + 1 + self.lcp.context["attempt"] = self.attempts + 1 correct_map = self.lcp.grade_answers(answers) # self.attempts refers to the number of attempts that did not # raise an error (0-based) @@ -1852,10 +1856,7 @@ def submit_problem(self, data, override_time=False): except (StudentInputError, ResponseError, LoncapaProblemError) as inst: if self.debug: - log.warning( - "StudentInputError in capa_block:problem_check", - exc_info=True - ) + log.warning("StudentInputError in capa_block:problem_check", exc_info=True) # Save the user's state before failing self.set_state_from_lcp() @@ -1864,7 +1865,7 @@ def submit_problem(self, data, override_time=False): # If the user is a staff member, include # the full exception, including traceback, # in the response - user_is_staff = self.runtime.service(self, 'user').get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) + user_is_staff = self.runtime.service(self, "user").get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) if user_is_staff: msg = f"Staff debug info: {traceback.format_exc()}" @@ -1878,7 +1879,7 @@ def submit_problem(self, data, override_time=False): except IndexError: msg = full_error - return {'success': msg} + return {"success": msg} except Exception as err: # Save the user's state before failing @@ -1887,38 +1888,36 @@ def submit_problem(self, data, override_time=False): if self.debug: msg = f"Error checking problem: {str(err)}" - msg += f'\nTraceback:\n{traceback.format_exc()}' - return {'success': msg} + msg += f"\nTraceback:\n{traceback.format_exc()}" + return {"success": msg} raise published_grade = self.publish_grade() # success = correct if ALL questions in this problem are correct - success = 'correct' + success = "correct" for answer_id in correct_map: if not correct_map.is_correct(answer_id): - success = 'incorrect' + success = "incorrect" # NOTE: We are logging both full grading and queued-grading submissions. In the latter, # 'success' will always be incorrect - event_info['grade'] = published_grade['grade'] - event_info['max_grade'] = published_grade['max_grade'] - event_info['correct_map'] = correct_map.get_dict() - event_info['success'] = success - event_info['attempts'] = self.attempts - event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map) - self.publish_unmasked('problem_check', event_info) + event_info["grade"] = published_grade["grade"] + event_info["max_grade"] = published_grade["max_grade"] + event_info["correct_map"] = correct_map.get_dict() + event_info["success"] = success + event_info["attempts"] = self.attempts + event_info["submission"] = self.get_submission_metadata_safe(answers_without_files, correct_map) + self.publish_unmasked("problem_check", event_info) # render problem into HTML html = self.get_problem_html(encapsulate=False, submit_notification=True) # Withhold success indicator if hiding correctness if not self.correctness_available(): - success = 'submitted' + success = "submitted" + + return {"success": success, "contents": html} - return { - 'success': success, - 'contents': html - } # pylint: enable=too-many-statements def get_score_with_grading_method(self, current_score: Score) -> Score: @@ -1968,32 +1967,32 @@ def unmask_event(self, event_info): # but check for the existence of the things we need to un-mask. # Look for answers/id - answer = event_info.get('answers', {}).get(response.answer_id) + answer = event_info.get("answers", {}).get(response.answer_id) if answer is not None: - event_info['answers'][response.answer_id] = response.unmask_name(answer) + event_info["answers"][response.answer_id] = response.unmask_name(answer) # Look for state/student_answers/id - answer = event_info.get('state', {}).get('student_answers', {}).get(response.answer_id) + answer = event_info.get("state", {}).get("student_answers", {}).get(response.answer_id) if answer is not None: - event_info['state']['student_answers'][response.answer_id] = response.unmask_name(answer) + event_info["state"]["student_answers"][response.answer_id] = response.unmask_name(answer) # Look for old_state/student_answers/id -- parallel to the above case, happens on reset - answer = event_info.get('old_state', {}).get('student_answers', {}).get(response.answer_id) + answer = event_info.get("old_state", {}).get("student_answers", {}).get(response.answer_id) if answer is not None: - event_info['old_state']['student_answers'][response.answer_id] = response.unmask_name(answer) + event_info["old_state"]["student_answers"][response.answer_id] = response.unmask_name(answer) # Add 'permutation' to event_info for permuted responses. permutation_option = None if response.has_shuffle(): - permutation_option = 'shuffle' + permutation_option = "shuffle" elif response.has_answerpool(): - permutation_option = 'answerpool' + permutation_option = "answerpool" if permutation_option is not None: # Add permutation record tuple: (one of:'shuffle'/'answerpool', [as-displayed list]) - if 'permutation' not in event_info: - event_info['permutation'] = {} - event_info['permutation'][response.answer_id] = (permutation_option, response.unmask_order()) + if "permutation" not in event_info: + event_info["permutation"] = {} + event_info["permutation"][response.answer_id] = (permutation_option, response.unmask_order()) def pretty_print_seconds(self, num_seconds): """ @@ -2032,7 +2031,7 @@ def get_submission_metadata_safe(self, answers, correct_map): # NOTE: The above process requires deep inspection of capa structures that may break for some # uncommon problem types. Ensure that it does not prevent answer submission in those # cases. Any occurrences of errors in this block should be investigated and resolved. - log.exception('Unable to gather submission metadata, it will not be included in the event.') + log.exception("Unable to gather submission metadata, it will not be included in the event.") return {} @@ -2065,7 +2064,7 @@ def get_submission_metadata(self, answers, correct_map): answer_input = self.lcp.inputs.get(input_id) if answer_input is None: - log.warning('Input id %s is not mapped to an input type.', input_id) + log.warning("Input id %s is not mapped to an input type.", input_id) answer_response = None for responder in self.lcp.responders.values(): @@ -2073,10 +2072,10 @@ def get_submission_metadata(self, answers, correct_map): answer_response = responder if answer_response is None: - log.warning('Answer responder could not be found for input_id %s.', input_id) + log.warning("Answer responder could not be found for input_id %s.", input_id) user_visible_answer = internal_answer - if hasattr(answer_input, 'get_user_visible_answer'): + if hasattr(answer_input, "get_user_visible_answer"): user_visible_answer = answer_input.get_user_visible_answer(internal_answer) # If this problem has rerandomize enabled, then it will generate N variants of the @@ -2084,23 +2083,23 @@ def get_submission_metadata(self, answers, correct_map): # variant was selected. Ideally it would be nice to have the exact question that # was presented to the user, with values interpolated etc, but that can be done # later if necessary. - variant = '' + variant = "" if self.rerandomize != RANDOMIZATION.NEVER: variant = self.get_seed() is_correct = correct_map.is_correct(input_id) if is_correct is None: - is_correct = '' + is_correct = "" - response_data = getattr(answer_input, 'response_data', {}) + response_data = getattr(answer_input, "response_data", {}) input_metadata[input_id] = { - 'question': response_data.get('label', ''), - 'answer': user_visible_answer, - 'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''), - 'input_type': getattr(answer_input, 'tag', ''), - 'correct': is_correct, - 'variant': variant, - 'group_label': response_data.get('group_label', ''), + "question": response_data.get("label", ""), + "answer": user_visible_answer, + "response_type": getattr(getattr(answer_response, "xml", None), "tag", ""), + "input_type": getattr(answer_input, "tag", ""), + "correct": is_correct, + "variant": variant, + "group_label": response_data.get("group_label", ""), } return input_metadata @@ -2112,34 +2111,31 @@ def save_problem(self, data): The message is informative on success, and an error message on failure. """ event_info = {} - event_info['state'] = self.lcp.get_state() - event_info['problem_id'] = str(self.location) + event_info["state"] = self.lcp.get_state() + event_info["problem_id"] = str(self.location) answers = self.make_dict_of_responses(data) - event_info['answers'] = answers + event_info["answers"] = answers _ = self.runtime.service(self, "i18n").gettext # Too late. Cannot submit if self.closed() and not self.max_attempts == 0: - event_info['failure'] = 'closed' - self.publish_unmasked('save_problem_fail', event_info) + event_info["failure"] = "closed" + self.publish_unmasked("save_problem_fail", event_info) return { - 'success': False, + "success": False, # pylint: disable=line-too-long # Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem. - 'msg': _("Problem is closed."), + "msg": _("Problem is closed."), # pylint: enable=line-too-long } # Problem submitted. Student should reset before saving # again. if self.done and self.rerandomize == RANDOMIZATION.ALWAYS: - event_info['failure'] = 'done' - self.publish_unmasked('save_problem_fail', event_info) - return { - 'success': False, - 'msg': _("Problem needs to be reset prior to save.") - } + event_info["failure"] = "done" + self.publish_unmasked("save_problem_fail", event_info) + return {"success": False, "msg": _("Problem needs to be reset prior to save.")} self.lcp.student_answers = answers self.lcp.has_saved_answers = True @@ -2147,17 +2143,13 @@ def save_problem(self, data): self.set_state_from_lcp() self.set_score(self.score_from_lcp(self.lcp)) - self.publish_unmasked('save_problem_success', event_info) + self.publish_unmasked("save_problem_success", event_info) msg = _("Your answers have been saved.") if not self.max_attempts == 0: - msg = _( - "Your answers have been saved but not graded. Click '{button_name}' to grade them." - ).format(button_name=self.submit_button_name()) - return { - 'success': True, - 'msg': msg, - 'html': self.get_problem_html(encapsulate=False) - } + msg = _("Your answers have been saved but not graded. Click '{button_name}' to grade them.").format( + button_name=self.submit_button_name() + ) + return {"success": True, "msg": msg, "html": self.get_problem_html(encapsulate=False)} def reset_problem(self, _data): """ @@ -2172,27 +2164,27 @@ def reset_problem(self, _data): `error` key containing an error message. """ event_info = {} - event_info['old_state'] = self.lcp.get_state() - event_info['problem_id'] = str(self.location) + event_info["old_state"] = self.lcp.get_state() + event_info["problem_id"] = str(self.location) _ = self.runtime.service(self, "i18n").gettext if self.closed(): - event_info['failure'] = 'closed' - self.publish_unmasked('reset_problem_fail', event_info) + event_info["failure"] = "closed" + self.publish_unmasked("reset_problem_fail", event_info) return { - 'success': False, + "success": False, # pylint: disable=line-too-long # Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem. - 'msg': _("You cannot select Reset for a problem that is closed."), + "msg": _("You cannot select Reset for a problem that is closed."), # pylint: enable=line-too-long } if not self.is_submitted(): - event_info['failure'] = 'not_done' - self.publish_unmasked('reset_problem_fail', event_info) + event_info["failure"] = "not_done" + self.publish_unmasked("reset_problem_fail", event_info) return { - 'success': False, - 'msg': _("You must submit an answer before you can select Reset."), + "success": False, + "msg": _("You must submit an answer before you can select Reset."), } if self.is_submitted() and self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET]: @@ -2209,12 +2201,12 @@ def reset_problem(self, _data): # Grade may have changed, so publish new value self.publish_grade() - event_info['new_state'] = self.lcp.get_state() - self.publish_unmasked('reset_problem', event_info) + event_info["new_state"] = self.lcp.get_state() + self.publish_unmasked("reset_problem", event_info) return { - 'success': True, - 'html': self.get_problem_html(encapsulate=False), + "success": True, + "html": self.get_problem_html(encapsulate=False), } # ScorableXBlockMixin methods @@ -2238,38 +2230,42 @@ def rescore(self, only_if_higher=False): Returns the error messages for exceptions occurring while performing the rescoring, rather than throwing them. """ - event_info = {'state': self.lcp.get_state(), 'problem_id': str(self.location)} + event_info = {"state": self.lcp.get_state(), "problem_id": str(self.location)} _ = self.runtime.service(self, "i18n").gettext if not self.lcp.supports_rescoring(): - event_info['failure'] = 'unsupported' - self.publish_unmasked('problem_rescore_fail', event_info) + event_info["failure"] = "unsupported" + self.publish_unmasked("problem_rescore_fail", event_info) # pylint: disable=line-too-long # Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score. raise NotImplementedError(_("Problem's definition does not support rescoring.")) # pylint: enable=line-too-long if not self.done: - event_info['failure'] = 'unanswered' - self.publish_unmasked('problem_rescore_fail', event_info) + event_info["failure"] = "unanswered" + self.publish_unmasked("problem_rescore_fail", event_info) raise NotFoundError(_("Problem must be answered before it can be graded again.")) # get old score, for comparison: orig_score = self.get_score() - event_info['orig_score'] = orig_score.raw_earned - event_info['orig_total'] = orig_score.raw_possible + event_info["orig_score"] = orig_score.raw_earned + event_info["orig_total"] = orig_score.raw_possible try: calculated_score = self.calculate_score() - except (StudentInputError, ResponseError, LoncapaProblemError) as inst: # lint-amnesty, pylint: disable=unused-variable + except ( + StudentInputError, + ResponseError, + LoncapaProblemError, + ) as inst: # lint-amnesty, pylint: disable=unused-variable log.warning("Input error in capa_block:problem_rescore", exc_info=True) - event_info['failure'] = 'input_error' - self.publish_unmasked('problem_rescore_fail', event_info) + event_info["failure"] = "input_error" + self.publish_unmasked("problem_rescore_fail", event_info) raise except Exception: - event_info['failure'] = 'unexpected' - self.publish_unmasked('problem_rescore_fail', event_info) + event_info["failure"] = "unexpected" + self.publish_unmasked("problem_rescore_fail", event_info) raise # rescoring should have no effect on attempts, so don't @@ -2277,21 +2273,21 @@ def rescore(self, only_if_higher=False): self.set_state_from_lcp() self.publish_grade(score=calculated_score, only_if_higher=only_if_higher) - event_info['new_score'] = calculated_score.raw_earned - event_info['new_total'] = calculated_score.raw_possible + event_info["new_score"] = calculated_score.raw_earned + event_info["new_total"] = calculated_score.raw_possible # success = correct if ALL questions in this problem are correct - success = 'correct' + success = "correct" for answer_id in self.lcp.correct_map: if not self.lcp.correct_map.is_correct(answer_id): - success = 'incorrect' + success = "incorrect" # NOTE: We are logging both full grading and queued-grading submissions. In the latter, # 'success' will always be incorrect - event_info['correct_map'] = self.lcp.correct_map.get_dict() - event_info['success'] = success - event_info['attempts'] = self.attempts - self.publish_unmasked('problem_rescore', event_info) + event_info["correct_map"] = self.lcp.correct_map.get_dict() + event_info["success"] = success + event_info["attempts"] = self.attempts + self.publish_unmasked("problem_rescore", event_info) def get_rescore_with_grading_method(self) -> Score: """ @@ -2339,7 +2335,7 @@ def update_correctness(self): """ # Make sure that the attempt number is always at least 1 for grading purposes, # even if the number of attempts have been reset and this problem is regraded. - self.lcp.context['attempt'] = max(self.attempts, 1) + self.lcp.context["attempt"] = max(self.attempts, 1) new_correct_map = self.lcp.get_grade_from_current_answers(None) self.lcp.correct_map.update(new_correct_map) @@ -2352,7 +2348,7 @@ def update_correctness_list(self): """ # Make sure that the attempt number is always at least 1 for grading purposes, # even if the number of attempts have been reset and this problem is regraded. - self.lcp.context['attempt'] = max(self.attempts, 1) + self.lcp.context["attempt"] = max(self.attempts, 1) new_correct_map_list = [] for student_answers, correct_map in zip(self.student_answers_history, self.correct_map_history): new_correct_map = self.lcp.get_grade_from_current_answers(student_answers, correct_map) @@ -2371,7 +2367,7 @@ def calculate_score(self): return self.get_rescore_with_grading_method() self.update_correctness() new_score = self.lcp.calculate_score() - return Score(raw_earned=new_score['score'], raw_possible=new_score['total']) + return Score(raw_earned=new_score["score"], raw_possible=new_score["total"]) def calculate_score_list(self): """ @@ -2381,7 +2377,7 @@ def calculate_score_list(self): for correct_map in self.lcp.correct_map_history: new_score = self.lcp.calculate_score(correct_map) - new_score_list.append(Score(raw_earned=new_score['score'], raw_possible=new_score['total'])) + new_score_list.append(Score(raw_earned=new_score["score"], raw_possible=new_score["total"])) return new_score_list def score_from_lcp(self, lcp): @@ -2390,7 +2386,7 @@ def score_from_lcp(self, lcp): currently stored by the LCP. """ lcp_score = lcp.calculate_score() - return Score(raw_earned=lcp_score['score'], raw_possible=lcp_score['total']) + return Score(raw_earned=lcp_score["score"], raw_possible=lcp_score["total"]) class GradingMethodHandler: @@ -2493,6 +2489,7 @@ class ComplexEncoder(json.JSONEncoder): """ Extend the JSON encoder to correctly handle complex numbers """ + def default(self, obj): # lint-amnesty, pylint: disable=arguments-differ, method-hidden """ Print a nicely formatted complex number, or default to the JSON encoder @@ -2510,15 +2507,12 @@ def randomization_bin(seed, problem_id): interesting. To avoid having sets of students that always get the same problems, we'll combine the system's per-student seed with the problem id in picking the bin. """ - r_hash = hashlib.sha1() + r_hash = hashlib.sha256() r_hash.update(str(seed).encode()) r_hash.update(str(problem_id).encode()) # get the first few digits of the hash, convert to an int, then mod. return int(r_hash.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS -ProblemBlock = ( - _ExtractedProblemBlock if settings.USE_EXTRACTED_PROBLEM_BLOCK - else _BuiltInProblemBlock -) +ProblemBlock = _ExtractedProblemBlock if settings.USE_EXTRACTED_PROBLEM_BLOCK else _BuiltInProblemBlock ProblemBlock.__name__ = "ProblemBlock" diff --git a/xmodule/contentstore/django.py b/xmodule/contentstore/django.py index 2d375c70dd75..545794cc472e 100644 --- a/xmodule/contentstore/django.py +++ b/xmodule/contentstore/django.py @@ -14,18 +14,18 @@ def load_function(path): path is a string of the form "path.to.module.function" returns the imported python object `function` from `path.to.module` """ - module_path, _, name = path.rpartition('.') + module_path, _, name = path.rpartition(".") return getattr(import_module(module_path), name) -def contentstore(name='default'): # lint-amnesty, pylint: disable=missing-function-docstring +def contentstore(name="default"): # lint-amnesty, pylint: disable=missing-function-docstring if name not in _CONTENTSTORE: - class_ = load_function(settings.CONTENTSTORE['ENGINE']) + class_ = load_function(settings.CONTENTSTORE["ENGINE"]) options = {} - options.update(settings.CONTENTSTORE['DOC_STORE_CONFIG']) - if 'ADDITIONAL_OPTIONS' in settings.CONTENTSTORE: - if name in settings.CONTENTSTORE['ADDITIONAL_OPTIONS']: - options.update(settings.CONTENTSTORE['ADDITIONAL_OPTIONS'][name]) + options.update(settings.CONTENTSTORE["DOC_STORE_CONFIG"]) + if "ADDITIONAL_OPTIONS" in settings.CONTENTSTORE: + if name in settings.CONTENTSTORE["ADDITIONAL_OPTIONS"]: + options.update(settings.CONTENTSTORE["ADDITIONAL_OPTIONS"][name]) _CONTENTSTORE[name] = class_(**options) return _CONTENTSTORE[name] diff --git a/xmodule/fields.py b/xmodule/fields.py index 2e65304d4422..b5f997f035f4 100644 --- a/xmodule/fields.py +++ b/xmodule/fields.py @@ -14,9 +14,10 @@ class Date(JSONField): - ''' + """ Date fields know how to parse and produce json (iso) compatible formats. Converts to tz aware datetimes. - ''' + """ + # See note below about not defaulting these CURRENT_YEAR = datetime.datetime.now(UTC).year PREVENT_DEFAULT_DAY_MON_SEED1 = datetime.datetime(CURRENT_YEAR, 1, 1, tzinfo=UTC) @@ -53,15 +54,16 @@ def from_json(self, field): # lint-amnesty, pylint: disable=arguments-differ return None elif isinstance(field, str): return self._parse_date_wo_default_month_day(field) - elif isinstance(field, int) or isinstance(field, float): # lint-amnesty, pylint: disable=consider-merging-isinstance + elif isinstance(field, int) or isinstance( # lint-amnesty, pylint: disable=consider-merging-isinstance + field, float + ): return datetime.datetime.fromtimestamp(field / 1000, UTC) elif isinstance(field, time.struct_time): return datetime.datetime.fromtimestamp(time.mktime(field), UTC) elif isinstance(field, datetime.datetime): return field else: - msg = "Field {} has bad value '{}'".format( - self.name, field) + msg = "Field {} has bad value '{}'".format(self.name, field) raise TypeError(msg) def to_json(self, value): @@ -72,7 +74,7 @@ def to_json(self, value): return None if isinstance(value, time.struct_time): # struct_times are always utc - return time.strftime('%Y-%m-%dT%H:%M:%SZ', value) + return time.strftime("%Y-%m-%dT%H:%M:%SZ", value) elif isinstance(value, datetime.datetime): if value.tzinfo is None or value.utcoffset().total_seconds() == 0: if value.year < 1900: @@ -80,7 +82,7 @@ def to_json(self, value): # isoformat instead return value.isoformat() # isoformat adds +00:00 rather than Z - return value.strftime('%Y-%m-%dT%H:%M:%SZ') + return value.strftime("%Y-%m-%dT%H:%M:%SZ") else: return value.isoformat() else: @@ -88,7 +90,10 @@ def to_json(self, value): enforce_type = from_json -TIMEDELTA_REGEX = re.compile(r'^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$') # lint-amnesty, pylint: disable=line-too-long + +TIMEDELTA_REGEX = re.compile( + r"^((?P\d+?) day(?:s?))?(\s)?((?P\d+?) hour(?:s?))?(\s)?((?P\d+?) minute(?:s)?)?(\s)?((?P\d+?) second(?:s)?)?$" # lint-amnesty, pylint: disable=line-too-long +) class Timedelta(JSONField): # lint-amnesty, pylint: disable=missing-class-docstring @@ -116,7 +121,7 @@ def from_json(self, time_str): # lint-amnesty, pylint: disable=arguments-differ return parts = parts.groupdict() time_params = {} - for (name, param) in parts.items(): + for name, param in parts.items(): if param: time_params[name] = int(param) return datetime.timedelta(**time_params) @@ -126,11 +131,11 @@ def to_json(self, value): return None values = [] - for attr in ('days', 'hours', 'minutes', 'seconds'): + for attr in ("days", "hours", "minutes", "seconds"): cur_value = getattr(value, attr, 0) if cur_value > 0: values.append("%d %s" % (cur_value, attr)) - return ' '.join(values) + return " ".join(values) def enforce_type(self, value): """ @@ -161,6 +166,7 @@ class RelativeTime(JSONField): Python object of RelativeTime is datetime.timedelta. JSONed representation of RelativeTime is "HH:MM:SS" """ + # Timedeltas are immutable, see http://docs.python.org/2/library/datetime.html#available-types MUTABLE = False @@ -173,17 +179,13 @@ def isotime_to_timedelta(cls, value): that max value that can be used by user is "23:59:59". """ try: - obj_time = time.strptime(value, '%H:%M:%S') + obj_time = time.strptime(value, "%H:%M:%S") except ValueError as e: raise ValueError( # lint-amnesty, pylint: disable=raise-missing-from "Incorrect RelativeTime value {!r} was set in XML or serialized. " "Original parse message is {}".format(value, str(e)) ) - return datetime.timedelta( - hours=obj_time.tm_hour, - minutes=obj_time.tm_min, - seconds=obj_time.tm_sec - ) + return datetime.timedelta(hours=obj_time.tm_hour, minutes=obj_time.tm_min, seconds=obj_time.tm_sec) def from_json(self, value): """ @@ -241,10 +243,10 @@ def timedelta_to_string(self, value): str(timedelta) has [H]H:MM:SS format, which is not suitable for front-end (and ISO time standard), so we force HH:MM:SS format. - """ + """ stringified = str(value) if len(stringified) == 7: - stringified = '0' + stringified + stringified = "0" + stringified return stringified def enforce_type(self, value): @@ -264,6 +266,7 @@ class ScoreField(JSONField): from their problem state, specifically for use in staff override of problem scores. """ + MUTABLE = False def from_json(self, value): @@ -272,17 +275,15 @@ def from_json(self, value): if isinstance(value, Score): return value - if set(value) != {'raw_earned', 'raw_possible'}: - raise TypeError('Scores must contain only a raw earned and raw possible value. Got {}'.format( - set(value) - )) + if set(value) != {"raw_earned", "raw_possible"}: + raise TypeError("Scores must contain only a raw earned and raw possible value. Got {}".format(set(value))) - raw_earned = value['raw_earned'] - raw_possible = value['raw_possible'] + raw_earned = value["raw_earned"] + raw_possible = value["raw_possible"] if raw_possible < 0: raise ValueError( - 'Error deserializing field of type {}: Expected a positive number for raw_possible, got {}.'.format( + "Error deserializing field of type {}: Expected a positive number for raw_possible, got {}.".format( self.display_name, raw_possible, ) @@ -290,10 +291,8 @@ def from_json(self, value): if not (0 <= raw_earned <= raw_possible): # lint-amnesty, pylint: disable=superfluous-parens raise ValueError( - 'Error deserializing field of type {}: Expected raw_earned between 0 and {}, got {}.'.format( - self.display_name, - raw_possible, - raw_earned + "Error deserializing field of type {}: Expected raw_earned between 0 and {}, got {}.".format( + self.display_name, raw_possible, raw_earned ) ) diff --git a/xmodule/progress.py b/xmodule/progress.py index dda88ad53eca..379dbf12a61d 100644 --- a/xmodule/progress.py +++ b/xmodule/progress.py @@ -1,16 +1,15 @@ -''' +""" Progress class for blocks. Represents where a student is in a block. For most subclassing needs, you should only need to reimplement frac() and __str__(). -''' - +""" import numbers class Progress: - '''Represents a progress of a/b (a out of b done) + """Represents a progress of a/b (a out of b done) a and b must be numeric, but not necessarily integer, with 0 <= a <= b and b > 0. @@ -19,17 +18,16 @@ class Progress: blocks (e.g. html) should return None from get_progress(). TODO: add tag for module type? Would allow for smarter merging. - ''' + """ def __init__(self, a, b): - '''Construct a Progress object. a and b must be numbers, and must have - 0 <= a <= b and b > 0 - ''' + """Construct a Progress object. a and b must be numbers, and must have + 0 <= a <= b and b > 0 + """ # Want to do all checking at construction time, so explicitly check types - if not (isinstance(a, numbers.Number) and - isinstance(b, numbers.Number)): - raise TypeError(f'a and b must be numbers. Passed {a}/{b}') + if not (isinstance(a, numbers.Number) and isinstance(b, numbers.Number)): + raise TypeError(f"a and b must be numbers. Passed {a}/{b}") if a > b: # lint-amnesty, pylint: disable=consider-using-min-builtin a = b @@ -38,56 +36,56 @@ def __init__(self, a, b): a = 0 if b <= 0: - raise ValueError(f'fraction a/b = {a}/{b} must have b > 0') + raise ValueError(f"fraction a/b = {a}/{b} must have b > 0") self._a = a self._b = b def frac(self): - ''' Return tuple (a,b) representing progress of a/b''' + """Return tuple (a,b) representing progress of a/b""" return (self._a, self._b) def percent(self): - ''' Returns a percentage progress as a float between 0 and 100. + """Returns a percentage progress as a float between 0 and 100. subclassing note: implemented in terms of frac(), assumes sanity checking is done at construction time. - ''' + """ (a, b) = self.frac() return 100.0 * a / b def started(self): - ''' Returns True if fractional progress is greater than 0. + """Returns True if fractional progress is greater than 0. subclassing note: implemented in terms of frac(), assumes sanity checking is done at construction time. - ''' + """ return self.frac()[0] > 0 def inprogress(self): - ''' Returns True if fractional progress is strictly between 0 and 1. + """Returns True if fractional progress is strictly between 0 and 1. subclassing note: implemented in terms of frac(), assumes sanity checking is done at construction time. - ''' + """ (a, b) = self.frac() return a > 0 and a < b # lint-amnesty, pylint: disable=chained-comparison def done(self): - ''' Return True if this represents done. + """Return True if this represents done. subclassing note: implemented in terms of frac(), assumes sanity checking is done at construction time. - ''' + """ (a, b) = self.frac() return a == b def ternary_str(self): - ''' Return a string version of this progress: either + """Return a string version of this progress: either "none", "in_progress", or "done". subclassing note: implemented in terms of frac() - ''' + """ (a, b) = self.frac() if a == 0: return "none" @@ -96,8 +94,8 @@ def ternary_str(self): return "done" def __eq__(self, other): - ''' Two Progress objects are equal if they have identical values. - Implemented in terms of frac()''' + """Two Progress objects are equal if they have identical values. + Implemented in terms of frac()""" if not isinstance(other, Progress): return False (a, b) = self.frac() @@ -105,26 +103,26 @@ def __eq__(self, other): return a == a2 and b == b2 def __ne__(self, other): - ''' The opposite of equal''' + """The opposite of equal""" return not self.__eq__(other) def __str__(self): - '''Return a string representation of this string. Rounds results to + """Return a string representation of this string. Rounds results to two decimal places, stripping out any trailing zeroes. subclassing note: implemented in terms of frac(). - ''' + """ (a, b) = self.frac() - display = lambda n: f'{n:.2f}'.rstrip('0').rstrip('.') + display = lambda n: f"{n:.2f}".rstrip("0").rstrip(".") return f"{display(a)}/{display(b)}" @staticmethod def add_counts(a, b): - '''Add two progress indicators, assuming that each represents items done: + """Add two progress indicators, assuming that each represents items done: (a / b) + (c / d) = (a + c) / (b + d). If either is None, returns the other. - ''' + """ if a is None: return b if b is None: diff --git a/xmodule/raw_block.py b/xmodule/raw_block.py index f24dd7712d85..703d84176092 100644 --- a/xmodule/raw_block.py +++ b/xmodule/raw_block.py @@ -9,20 +9,23 @@ log = logging.getLogger(__name__) -PRE_TAG_REGEX = re.compile(r']*>(?:(?=([^<]+))\1|<(?!pre\b[^>]*>))*?') +PRE_TAG_REGEX = re.compile(r"]*>(?:(?=([^<]+))\1|<(?!pre\b[^>]*>))*?") class RawMixin: """ Common code between RawDescriptor and XBlocks converted from XModules. """ + resources_dir = None data = String(help="XML data for the block", default="", scope=Scope.content) @classmethod - def definition_from_xml(cls, xml_object, system): # lint-amnesty, pylint: disable=missing-function-docstring, unused-argument - return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, [] + def definition_from_xml( + cls, xml_object, system + ): # lint-amnesty, pylint: disable=missing-function-docstring, unused-argument + return {"data": etree.tostring(xml_object, pretty_print=True, encoding="unicode")}, [] def definition_to_xml(self, resource_fs): # lint-amnesty, pylint: disable=unused-argument """ @@ -49,13 +52,10 @@ def definition_to_xml(self, resource_fs): # lint-amnesty, pylint: disable=unuse except etree.XMLSyntaxError as err: # Can't recover here, so just add some info and # re-raise - lines = self.data.split('\n') + lines = self.data.split("\n") line, offset = err.position - msg = ( - "Unable to create xml for block {loc}. " - "Context: '{context}'" - ).format( - context=lines[line - 1][offset - 40:offset + 40], + msg = ("Unable to create xml for block {loc}. " "Context: '{context}'").format( + context=lines[line - 1][offset - 40 : offset + 40], loc=self.location, ) raise SerializationError(self.location, msg) # lint-amnesty, pylint: disable=raise-missing-from @@ -87,15 +87,16 @@ class EmptyDataRawMixin: """ Common code between EmptyDataRawDescriptor and XBlocks converted from XModules. """ + resources_dir = None - data = String(default='', scope=Scope.content) + data = String(default="", scope=Scope.content) @classmethod def definition_from_xml(cls, xml_object, system): # lint-amnesty, pylint: disable=unused-argument if len(xml_object) == 0 and len(list(xml_object.items())) == 0: - return {'data': ''}, [] - return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, [] + return {"data": ""}, [] + return {"data": etree.tostring(xml_object, pretty_print=True, encoding="unicode")}, [] def definition_to_xml(self, resource_fs): # lint-amnesty, pylint: disable=unused-argument if self.data: diff --git a/xmodule/stringify.py b/xmodule/stringify.py index c97af7232338..8bc0e8da3b75 100644 --- a/xmodule/stringify.py +++ b/xmodule/stringify.py @@ -4,7 +4,7 @@ def stringify_children(node): - ''' + """ Return all contents of an xml tree, without the outside tags. e.g. if node is parse of "Hi
                    there Bruce!
                    " @@ -13,7 +13,7 @@ def stringify_children(node): fixed from http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml - ''' + """ # Useful things to know: # node.tostring() -- generates xml for the node, including start @@ -24,7 +24,7 @@ def stringify_children(node): # next element. parts = [node.text] for c in node.getchildren(): - parts.append(etree.tostring(c, with_tail=True, encoding='unicode')) + parts.append(etree.tostring(c, with_tail=True, encoding="unicode")) # filter removes possible Nones in texts and tails - return ''.join([part for part in parts if part]) + return "".join([part for part in parts if part]) diff --git a/xmodule/tests/test_capa_block.py b/xmodule/tests/test_capa_block.py index 21582e55eac3..036f05349c10 100644 --- a/xmodule/tests/test_capa_block.py +++ b/xmodule/tests/test_capa_block.py @@ -1,6 +1,7 @@ """ Tests of the Capa XModule """ + # pylint: disable=invalid-name @@ -12,15 +13,14 @@ import unittest from unittest.mock import DEFAULT, Mock, PropertyMock, patch -import pytest import ddt +import pytest import requests import webob from codejail.safe_exec import SafeExecException from django.conf import settings from django.test import override_settings from django.utils.encoding import smart_str -from lms.djangoapps.courseware.user_state_client import XBlockUserState from lxml import etree from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator from pytz import UTC @@ -30,21 +30,21 @@ from xblock.scorable import Score import xmodule +from lms.djangoapps.courseware.user_state_client import XBlockUserState from openedx.core.djangolib.testing.utils import skip_unless_lms from xmodule.capa import responsetypes from xmodule.capa.correctmap import CorrectMap from xmodule.capa.responsetypes import LoncapaProblemError, ResponseError, StudentInputError +from xmodule.capa.tests.test_util import use_unsafe_codejail from xmodule.capa.xqueue_interface import XQueueInterface from xmodule.capa_block import ComplexEncoder, ProblemBlock from xmodule.tests import DATA_DIR -from xmodule.capa.tests.test_util import use_unsafe_codejail from ..capa_block import RANDOMIZATION, SHOWANSWER from . import get_test_system - FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS = settings.FEATURES.copy() -FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS['ENABLE_GRADING_METHOD_IN_PROBLEMS'] = True +FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS["ENABLE_GRADING_METHOD_IN_PROBLEMS"] = True class CapaFactory: @@ -52,7 +52,8 @@ class CapaFactory: A helper class to create problem blocks with various parameters for testing. """ - sample_problem_xml = textwrap.dedent("""\ + sample_problem_xml = textwrap.dedent( + """\ @@ -62,7 +63,8 @@ class CapaFactory: - """) + """ + ) num = 0 @@ -83,12 +85,23 @@ def answer_key(cls, response_num=2, input_num=1): """ Return the key stored in the capa problem answer dict """ - return ("%s_%d_%d" % ("-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % cls.num]), - response_num, input_num)) + return "%s_%d_%d" % ( + "-".join(["i4x", "edX", "capa_test", "problem", "SampleProblem%d" % cls.num]), + response_num, + input_num, + ) @classmethod - def create(cls, attempts=None, problem_state=None, correct=False, xml=None, override_get_score=True, - render_template=None, **kwargs): + def create( + cls, + attempts=None, + problem_state=None, + correct=False, + xml=None, + override_get_score=True, + render_template=None, + **kwargs, + ): """ All parameters are optional, and are added to the created problem if specified. @@ -114,24 +127,24 @@ def create(cls, attempts=None, problem_state=None, correct=False, xml=None, over ) if xml is None: xml = cls.sample_problem_xml - field_data = {'data': xml} + field_data = {"data": xml} field_data.update(kwargs) if problem_state is not None: field_data.update(problem_state) if attempts is not None: # converting to int here because I keep putting "0" and "1" in the tests # since everything else is a string. - field_data['attempts'] = int(attempts) + field_data["attempts"] = int(attempts) system = get_test_system( course_id=location.course_key, - user_is_staff=kwargs.get('user_is_staff', False), + user_is_staff=kwargs.get("user_is_staff", False), render_template=render_template or Mock(return_value="
                    Test Template HTML
                    "), ) block = ProblemBlock( system, DictFieldData(field_data), - ScopeIds(None, 'problem', location, location), + ScopeIds(None, "problem", location, location), ) assert block.lcp @@ -142,7 +155,7 @@ def create(cls, attempts=None, problem_state=None, correct=False, xml=None, over else: block.score = Score(raw_earned=0, raw_possible=1) - block.graded = 'False' + block.graded = "False" block.weight = 1 return block @@ -151,7 +164,9 @@ class CapaFactoryWithFiles(CapaFactory): """ A factory for creating a Capa problem with files attached. """ - sample_problem_xml = textwrap.dedent("""\ + + sample_problem_xml = textwrap.dedent( + """\ closed - block = CapaFactory.create(max_attempts="1", attempts="0", - due=self.yesterday_str) + block = CapaFactory.create(max_attempts="1", attempts="0", due=self.yesterday_str) assert block.closed() - @patch.object(ProblemBlock, 'course_end_date', new_callable=PropertyMock) + @patch.object(ProblemBlock, "course_end_date", new_callable=PropertyMock) def test_closed_for_archive(self, mock_course_end_date): # Utility to create a datetime object in the past def past_datetime(days): - return (datetime.datetime.now(UTC) - datetime.timedelta(days=days)) + return datetime.datetime.now(UTC) - datetime.timedelta(days=days) # Utility to create a datetime object in the future def future_datetime(days): - return (datetime.datetime.now(UTC) + datetime.timedelta(days=days)) + return datetime.datetime.now(UTC) + datetime.timedelta(days=days) block = CapaFactory.create(max_attempts="1", attempts="0") @@ -691,14 +755,16 @@ def test_parse_get_params(self): # Valid GET param dict # 'input_5' intentionally left unset, - valid_get_dict = MultiDict({ - 'input_1': 'test', - 'input_1_2': 'test', - 'input_1_2_3': 'test', - 'input_[]_3': 'test', - 'input_4': None, - 'input_6': 5 - }) + valid_get_dict = MultiDict( + { + "input_1": "test", + "input_1_2": "test", + "input_1_2_3": "test", + "input_[]_3": "test", + "input_4": None, + "input_6": 5, + } + ) result = ProblemBlock.make_dict_of_responses(valid_get_dict) @@ -706,32 +772,31 @@ def test_parse_get_params(self): # and that we get the same values back for key in result.keys(): # lint-amnesty, pylint: disable=consider-iterating-dictionary original_key = "input_" + key - assert original_key in valid_get_dict, ('Output dict should have key %s' % original_key) + assert original_key in valid_get_dict, "Output dict should have key %s" % original_key assert valid_get_dict[original_key] == result[key] # Valid GET param dict with list keys # Each tuple represents a single parameter in the query string - valid_get_dict = MultiDict((('input_2[]', 'test1'), ('input_2[]', 'test2'))) + valid_get_dict = MultiDict((("input_2[]", "test1"), ("input_2[]", "test2"))) result = ProblemBlock.make_dict_of_responses(valid_get_dict) - assert '2' in result - assert ['test1', 'test2'] == result['2'] + assert "2" in result + assert ["test1", "test2"] == result["2"] # If we use [] at the end of a key name, we should always # get a list, even if there's just one value - valid_get_dict = MultiDict({'input_1[]': 'test'}) + valid_get_dict = MultiDict({"input_1[]": "test"}) result = ProblemBlock.make_dict_of_responses(valid_get_dict) - assert result['1'] == ['test'] + assert result["1"] == ["test"] # If we have no underscores in the name, then the key is invalid - invalid_get_dict = MultiDict({'input': 'test'}) + invalid_get_dict = MultiDict({"input": "test"}) with pytest.raises(ValueError): result = ProblemBlock.make_dict_of_responses(invalid_get_dict) # Two equivalent names (one list, one non-list) # One of the values would overwrite the other, so detect this # and raise an exception - invalid_get_dict = MultiDict({'input_1[]': 'test 1', - 'input_1': 'test 2'}) + invalid_get_dict = MultiDict({"input_1[]": "test 1", "input_1": "test 2"}) with pytest.raises(ValueError): result = ProblemBlock.make_dict_of_responses(invalid_get_dict) @@ -742,29 +807,29 @@ def test_submit_problem_correct(self): # Simulate that all answers are marked correct, no matter # what the input is, by patching CorrectMap.is_correct() # Also simulate rendering the HTML - with patch('xmodule.capa.correctmap.CorrectMap.is_correct') as mock_is_correct: - with patch('xmodule.capa_block.ProblemBlock.get_problem_html') as mock_html: + with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: + with patch("xmodule.capa_block.ProblemBlock.get_problem_html") as mock_html: mock_is_correct.return_value = True mock_html.return_value = "Test HTML" # Check the problem - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) # Expect that the problem is marked correct - assert result['success'] == 'correct' + assert result["success"] == "correct" # Expect that we get the (mocked) HTML - assert result['contents'] == 'Test HTML' + assert result["contents"] == "Test HTML" # Expect that the number of attempts is incremented by 1 assert block.attempts == 2 # and that this was considered attempt number 2 for grading purposes - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 - @patch('xmodule.capa_block.ProblemBlock.get_score_with_grading_method') - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') + @patch("xmodule.capa_block.ProblemBlock.get_score_with_grading_method") + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_with_grading_method_disable( self, mock_html: Mock, mock_is_correct: Mock, mock_get_score: Mock ): @@ -778,43 +843,41 @@ def test_submit_problem_with_grading_method_disable( # First Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=1, raw_possible=1) mock_get_score.assert_not_called() # Second Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.50'} + get_request_dict = {CapaFactory.input_key(): "3.50"} block.submit_problem(get_request_dict) assert block.attempts == 2 - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=0, raw_possible=1) mock_get_score.assert_not_called() # Third Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 3 - assert block.lcp.context['attempt'] == 3 + assert block.lcp.context["attempt"] == 3 assert block.score == Score(raw_earned=1, raw_possible=1) mock_get_score.assert_not_called() @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') - def test_submit_problem_with_grading_method_enable( - self, mock_html: Mock, mock_is_correct: Mock - ): + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + def test_submit_problem_with_grading_method_enable(self, mock_html: Mock, mock_is_correct: Mock): """ Test that the grading method is enabled when submit a problem. Then, the `get_score_with_grading_method` method should be called. @@ -824,21 +887,19 @@ def test_submit_problem_with_grading_method_enable( mock_is_correct.return_value = True with patch.object( - ProblemBlock, 'get_score_with_grading_method', wraps=block.get_score_with_grading_method + ProblemBlock, "get_score_with_grading_method", wraps=block.get_score_with_grading_method ) as mock_get_score: - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=1, raw_possible=1) mock_get_score.assert_called() - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') - def test_submit_problem_grading_method_disable_to_enable( - self, mock_html: Mock, mock_is_correct: Mock - ): + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + def test_submit_problem_grading_method_disable_to_enable(self, mock_html: Mock, mock_is_correct: Mock): """ Test when the grading method is disabled and then enabled. @@ -850,126 +911,116 @@ def test_submit_problem_grading_method_disable_to_enable( # Disabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=False + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=False ): # First Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=1, raw_possible=1) # Second Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.50'} + get_request_dict = {CapaFactory.input_key(): "3.50"} block.submit_problem(get_request_dict) assert block.attempts == 2 - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=0, raw_possible=1) # Enabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=True + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=True ): # Third Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.96'} + get_request_dict = {CapaFactory.input_key(): "3.96"} block.submit_problem(get_request_dict) assert block.attempts == 3 - assert block.lcp.context['attempt'] == 3 + assert block.lcp.context["attempt"] == 3 assert block.score == Score(raw_earned=0, raw_possible=1) # Fourth Attempt - block.grading_method = 'highest_score' + block.grading_method = "highest_score" mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.99'} + get_request_dict = {CapaFactory.input_key(): "3.99"} block.submit_problem(get_request_dict) assert block.attempts == 4 - assert block.lcp.context['attempt'] == 4 + assert block.lcp.context["attempt"] == 4 assert block.score == Score(raw_earned=1, raw_possible=1) - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') - def test_submit_problem_grading_method_enable_to_disable( - self, mock_html: Mock, mock_is_correct: Mock - ): + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") + def test_submit_problem_grading_method_enable_to_disable(self, mock_html: Mock, mock_is_correct: Mock): """ Test when the grading method is enabled and then disabled. When the grading method is enabled, the final score is calculated according to the grading method. When the grading method is disabled, the final score is always the last attempt. """ - block = CapaFactory.create(attempts=0, max_attempts=4, grading_method='highest_score') + block = CapaFactory.create(attempts=0, max_attempts=4, grading_method="highest_score") mock_html.return_value = "Test HTML" # Enabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=True + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=True ): # First Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=1, raw_possible=1) # Second Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.50'} + get_request_dict = {CapaFactory.input_key(): "3.50"} block.submit_problem(get_request_dict) assert block.attempts == 2 - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=1, raw_possible=1) # Disabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=False + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=False ): # Third Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.96'} + get_request_dict = {CapaFactory.input_key(): "3.96"} block.submit_problem(get_request_dict) assert block.attempts == 3 - assert block.lcp.context['attempt'] == 3 + assert block.lcp.context["attempt"] == 3 assert block.score == Score(raw_earned=0, raw_possible=1) # Fourth Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 4 - assert block.lcp.context['attempt'] == 4 + assert block.lcp.context["attempt"] == 4 assert block.score == Score(raw_earned=1, raw_possible=1) @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_last_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test the `last_score` grading method. @@ -983,27 +1034,27 @@ def test_submit_problem_correct_last_score(self, mock_html: Mock, mock_is_correc # First Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=1, raw_possible=1) # Second Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.54'} + get_request_dict = {CapaFactory.input_key(): "3.54"} block.submit_problem(get_request_dict) assert block.attempts == 2 - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=0, raw_possible=1) @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_highest_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test the `highest_score` grading method. @@ -1011,32 +1062,32 @@ def test_submit_problem_correct_highest_score(self, mock_html: Mock, mock_is_cor When the grading method is `highest_score`, the final score is the highest score among all attempts. """ - block = CapaFactory.create(attempts=0, max_attempts=2, grading_method='highest_score') + block = CapaFactory.create(attempts=0, max_attempts=2, grading_method="highest_score") mock_html.return_value = "Test HTML" # First Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=1, raw_possible=1) # Second Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.54'} + get_request_dict = {CapaFactory.input_key(): "3.54"} block.submit_problem(get_request_dict) assert block.attempts == 2 - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=1, raw_possible=1) @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_first_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test the `first_score` grading method. @@ -1044,32 +1095,32 @@ def test_submit_problem_correct_first_score(self, mock_html: Mock, mock_is_corre When the grading method is `first_score`, the final score is the first score among all attempts. """ - block = CapaFactory.create(attempts=0, max_attempts=2, grading_method='first_score') + block = CapaFactory.create(attempts=0, max_attempts=2, grading_method="first_score") mock_html.return_value = "Test HTML" # First Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=0, raw_possible=1) # Second Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.54'} + get_request_dict = {CapaFactory.input_key(): "3.54"} block.submit_problem(get_request_dict) assert block.attempts == 2 - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=0, raw_possible=1) @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) - @patch('xmodule.capa.correctmap.CorrectMap.is_correct') - @patch('xmodule.capa_block.ProblemBlock.get_problem_html') + @patch("xmodule.capa.correctmap.CorrectMap.is_correct") + @patch("xmodule.capa_block.ProblemBlock.get_problem_html") def test_submit_problem_correct_average_score(self, mock_html: Mock, mock_is_correct: Mock): """ Test the `average_score` grading method. @@ -1077,47 +1128,47 @@ def test_submit_problem_correct_average_score(self, mock_html: Mock, mock_is_cor When the grading method is `average_score`, the final score is the average score among all attempts. """ - block = CapaFactory.create(attempts=0, max_attempts=4, grading_method='average_score') + block = CapaFactory.create(attempts=0, max_attempts=4, grading_method="average_score") mock_html.return_value = "Test HTML" # First Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 assert block.score == Score(raw_earned=0, raw_possible=1) # Second Attempt mock_is_correct.return_value = True - get_request_dict = {CapaFactory.input_key(): '3.54'} + get_request_dict = {CapaFactory.input_key(): "3.54"} block.submit_problem(get_request_dict) assert block.attempts == 2 - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 assert block.score == Score(raw_earned=0.5, raw_possible=1) # Third Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '3.45'} + get_request_dict = {CapaFactory.input_key(): "3.45"} block.submit_problem(get_request_dict) assert block.attempts == 3 - assert block.lcp.context['attempt'] == 3 + assert block.lcp.context["attempt"] == 3 assert block.score == Score(raw_earned=0.33, raw_possible=1) # Fourth Attempt mock_is_correct.return_value = False - get_request_dict = {CapaFactory.input_key(): '41.3'} + get_request_dict = {CapaFactory.input_key(): "41.3"} block.submit_problem(get_request_dict) assert block.attempts == 4 - assert block.lcp.context['attempt'] == 4 + assert block.lcp.context["attempt"] == 4 assert block.score == Score(raw_earned=0.25, raw_possible=1) def test_submit_problem_incorrect(self): @@ -1125,39 +1176,36 @@ def test_submit_problem_incorrect(self): block = CapaFactory.create(attempts=0) # Simulate marking the input incorrect - with patch('xmodule.capa.correctmap.CorrectMap.is_correct') as mock_is_correct: + with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = False # Check the problem - get_request_dict = {CapaFactory.input_key(): '0'} + get_request_dict = {CapaFactory.input_key(): "0"} result = block.submit_problem(get_request_dict) # Expect that the problem is marked correct - assert result['success'] == 'incorrect' + assert result["success"] == "incorrect" # Expect that the number of attempts is incremented by 1 assert block.attempts == 1 # and that this is considered the first attempt - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 def test_submit_problem_closed(self): block = CapaFactory.create(attempts=3) # Problem closed -- cannot submit # Simulate that ProblemBlock.closed() always returns True - with patch('xmodule.capa_block.ProblemBlock.closed') as mock_closed: + with patch("xmodule.capa_block.ProblemBlock.closed") as mock_closed: mock_closed.return_value = True with pytest.raises(xmodule.exceptions.NotFoundError): - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) # Expect that number of attempts NOT incremented assert block.attempts == 3 - @ddt.data( - RANDOMIZATION.ALWAYS, - 'true' - ) + @ddt.data(RANDOMIZATION.ALWAYS, "true") def test_submit_problem_resubmitted_with_randomize(self, rerandomize): # Randomize turned on block = CapaFactory.create(rerandomize=rerandomize, attempts=0) @@ -1167,55 +1215,49 @@ def test_submit_problem_resubmitted_with_randomize(self, rerandomize): # Expect that we cannot submit with pytest.raises(xmodule.exceptions.NotFoundError): - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) # Expect that number of attempts NOT incremented assert block.attempts == 0 - @ddt.data( - RANDOMIZATION.NEVER, - 'false', - RANDOMIZATION.PER_STUDENT - ) + @ddt.data(RANDOMIZATION.NEVER, "false", RANDOMIZATION.PER_STUDENT) def test_submit_problem_resubmitted_no_randomize(self, rerandomize): # Randomize turned off block = CapaFactory.create(rerandomize=rerandomize, attempts=0, done=True) # Expect that we can submit successfully - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) - assert result['success'] == 'correct' + assert result["success"] == "correct" # Expect that number of attempts IS incremented, still same attempt assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 def test_submit_problem_queued(self): block = CapaFactory.create(attempts=1) # Simulate that the problem is queued multipatch = patch.multiple( - 'xmodule.capa.capa_problem.LoncapaProblem', - is_queued=DEFAULT, - get_recentmost_queuetime=DEFAULT + "xmodule.capa.capa_problem.LoncapaProblem", is_queued=DEFAULT, get_recentmost_queuetime=DEFAULT ) with multipatch as values: - values['is_queued'].return_value = True - values['get_recentmost_queuetime'].return_value = datetime.datetime.now(UTC) + values["is_queued"].return_value = True + values["get_recentmost_queuetime"].return_value = datetime.datetime.now(UTC) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' - assert 'You must wait' in result['success'] + assert "You must wait" in result["success"] # Expect that the number of attempts is NOT incremented assert block.attempts == 1 @pytest.mark.django_db - @patch.object(XQueueInterface, '_http_post') + @patch.object(XQueueInterface, "_http_post") def test_submit_problem_with_files(self, mock_xqueue_post): # Check a problem with uploaded files, using the submit_problem API. # pylint: disable=protected-access @@ -1235,7 +1277,7 @@ def test_submit_problem_with_files(self, mock_xqueue_post): # Create a request dictionary for submit_problem. get_request_dict = { CapaFactoryWithFiles.input_key(response_num=2): fileobjs, - CapaFactoryWithFiles.input_key(response_num=3): 'None', + CapaFactoryWithFiles.input_key(response_num=3): "None", } block.submit_problem(get_request_dict) @@ -1261,12 +1303,12 @@ def test_submit_problem_with_files(self, mock_xqueue_post): assert mock_xqueue_post.call_count == 1 _, kwargs = mock_xqueue_post.call_args - self.assertCountEqual(fpaths, list(kwargs['files'].keys())) - for fpath, fileobj in kwargs['files'].items(): + self.assertCountEqual(fpaths, list(kwargs["files"].keys())) + for fpath, fileobj in kwargs["files"].items(): assert fpath == fileobj.name @pytest.mark.django_db - @patch.object(XQueueInterface, '_http_post') + @patch.object(XQueueInterface, "_http_post") def test_submit_problem_with_files_as_xblock(self, mock_xqueue_post): # Check a problem with uploaded files, using the XBlock API. # pylint: disable=protected-access @@ -1287,78 +1329,75 @@ def test_submit_problem_with_files_as_xblock(self, mock_xqueue_post): post_data = [] for fname, fileobj in zip(fnames, fileobjs): post_data.append((CapaFactoryWithFiles.input_key(response_num=2), (fname, fileobj))) - post_data.append((CapaFactoryWithFiles.input_key(response_num=3), 'None')) - request = webob.Request.blank("/some/fake/url", POST=post_data, content_type='multipart/form-data') + post_data.append((CapaFactoryWithFiles.input_key(response_num=3), "None")) + request = webob.Request.blank("/some/fake/url", POST=post_data, content_type="multipart/form-data") - block.handle('xmodule_handler', request, 'problem_check') + block.handle("xmodule_handler", request, "problem_check") assert mock_xqueue_post.call_count == 1 _, kwargs = mock_xqueue_post.call_args - self.assertCountEqual(fnames, list(kwargs['files'].keys())) - for fpath, fileobj in kwargs['files'].items(): + self.assertCountEqual(fnames, list(kwargs["files"].keys())) + for fpath, fileobj in kwargs["files"].items(): assert fpath == fileobj.name def test_submit_problem_error(self): # Try each exception that capa_block should handle - exception_classes = [StudentInputError, - LoncapaProblemError, - ResponseError] + exception_classes = [StudentInputError, LoncapaProblemError, ResponseError] for exception_class in exception_classes: # Create the block block = CapaFactory.create(attempts=1, user_is_staff=False) # Simulate answering a problem that raises the exception - with patch('xmodule.capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: - mock_grade.side_effect = exception_class('test error') + with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: + mock_grade.side_effect = exception_class("test error") - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' - expected_msg = 'test error' + expected_msg = "test error" - assert expected_msg == result['success'] + assert expected_msg == result["success"] # Expect that the number of attempts is NOT incremented assert block.attempts == 1 # but that this was considered attempt number 2 for grading purposes - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 def test_submit_problem_error_with_codejail_exception(self): # Try each exception that capa_block should handle - exception_classes = [StudentInputError, - LoncapaProblemError, - ResponseError] + exception_classes = [StudentInputError, LoncapaProblemError, ResponseError] for exception_class in exception_classes: # Create the block block = CapaFactory.create(attempts=1, user_is_staff=False) # Simulate a codejail exception "Exception: Couldn't execute jailed code" - with patch('xmodule.capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: + with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: try: raise ResponseError( - 'Couldn\'t execute jailed code: stdout: \'\', ' - 'stderr: \'Traceback (most recent call last):\\n' + "Couldn't execute jailed code: stdout: '', " + "stderr: 'Traceback (most recent call last):\\n" ' File "jailed_code", line 15, in \\n' ' exec code in g_dict\\n File "", line 67, in \\n' ' File "", line 65, in check_func\\n' - 'Exception: Couldn\'t execute jailed code\\n\' with status code: 1', ) + "Exception: Couldn't execute jailed code\\n' with status code: 1", + ) except ResponseError as err: mock_grade.side_effect = exception_class(str(err)) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' without the text of the stack trace - expected_msg = 'Couldn\'t execute jailed code' - assert expected_msg == result['success'] + expected_msg = "Couldn't execute jailed code" + assert expected_msg == result["success"] # Expect that the number of attempts is NOT incremented assert block.attempts == 1 # but that this was considered the second attempt for grading purposes - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 @override_settings(DEBUG=True) def test_submit_problem_other_errors(self): @@ -1371,15 +1410,15 @@ def test_submit_problem_other_errors(self): block = CapaFactory.create(attempts=1, user_is_staff=False) # Simulate answering a problem that raises the exception - with patch('xmodule.capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: + with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: error_msg = "Superterrible error happened: ☠" mock_grade.side_effect = Exception(error_msg) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' - assert error_msg in result['success'] + assert error_msg in result["success"] def test_submit_problem_zero_max_grade(self): """ @@ -1389,96 +1428,90 @@ def test_submit_problem_zero_max_grade(self): block = CapaFactory.create(attempts=1) # Override the problem score to have a total of zero. - block.lcp.get_score = lambda: {'score': 0, 'total': 0} + block.lcp.get_score = lambda: {"score": 0, "total": 0} # Check the problem - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) def test_submit_problem_error_nonascii(self): # Try each exception that capa_block should handle - exception_classes = [StudentInputError, - LoncapaProblemError, - ResponseError] + exception_classes = [StudentInputError, LoncapaProblemError, ResponseError] for exception_class in exception_classes: # Create the block block = CapaFactory.create(attempts=1, user_is_staff=False) # Simulate answering a problem that raises the exception - with patch('xmodule.capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: + with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: mock_grade.side_effect = exception_class("ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ") - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' - expected_msg = 'ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ' + expected_msg = "ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ" - assert expected_msg == result['success'] + assert expected_msg == result["success"] # Expect that the number of attempts is NOT incremented assert block.attempts == 1 # but that this was considered the second attempt for grading purposes - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 def test_submit_problem_error_with_staff_user(self): # Try each exception that capa block should handle - for exception_class in [StudentInputError, - LoncapaProblemError, - ResponseError]: + for exception_class in [StudentInputError, LoncapaProblemError, ResponseError]: # Create the block block = CapaFactory.create(attempts=1, user_is_staff=True) # Simulate answering a problem that raises an exception - with patch('xmodule.capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade: - mock_grade.side_effect = exception_class('test error') + with patch("xmodule.capa.capa_problem.LoncapaProblem.grade_answers") as mock_grade: + mock_grade.side_effect = exception_class("test error") - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.submit_problem(get_request_dict) # Expect an AJAX alert message in 'success' - assert 'test error' in result['success'] + assert "test error" in result["success"] # We DO include traceback information for staff users - assert 'Traceback' in result['success'] + assert "Traceback" in result["success"] # Expect that the number of attempts is NOT incremented assert block.attempts == 1 # but that it was considered the second attempt for grading purposes - assert block.lcp.context['attempt'] == 2 + assert block.lcp.context["attempt"] == 2 @ddt.data( - ("never", True, None, 'submitted'), - ("never", False, None, 'submitted'), - ("past_due", True, None, 'submitted'), - ("past_due", False, None, 'submitted'), - ("always", True, 1, 'correct'), - ("always", False, 0, 'incorrect'), + ("never", True, None, "submitted"), + ("never", False, None, "submitted"), + ("past_due", True, None, "submitted"), + ("past_due", False, None, "submitted"), + ("always", True, 1, "correct"), + ("always", False, 0, "incorrect"), ) @ddt.unpack def test_handle_ajax_show_correctness(self, show_correctness, is_correct, expected_score, expected_success): - block = CapaFactory.create(show_correctness=show_correctness, - due=self.tomorrow_str, - correct=is_correct) + block = CapaFactory.create(show_correctness=show_correctness, due=self.tomorrow_str, correct=is_correct) # Simulate marking the input correct/incorrect - with patch('xmodule.capa.correctmap.CorrectMap.is_correct') as mock_is_correct: + with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = is_correct # Check the problem - get_request_dict = {CapaFactory.input_key(): '0'} - json_result = block.handle_ajax('problem_check', get_request_dict) + get_request_dict = {CapaFactory.input_key(): "0"} + json_result = block.handle_ajax("problem_check", get_request_dict) result = json.loads(json_result) # Expect that the AJAX result withholds correctness and score - assert result['current_score'] == expected_score - assert result['success'] == expected_success + assert result["current_score"] == expected_score + assert result["success"] == expected_success # Expect that the number of attempts is incremented by 1 assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 def test_reset_problem(self): block = CapaFactory.create(done=True) @@ -1486,7 +1519,7 @@ def test_reset_problem(self): block.choose_new_seed = Mock(wraps=block.choose_new_seed) # Stub out HTML rendering - with patch('xmodule.capa_block.ProblemBlock.get_problem_html') as mock_html: + with patch("xmodule.capa_block.ProblemBlock.get_problem_html") as mock_html: mock_html.return_value = "
                    Test HTML
                    " # Reset the problem @@ -1494,11 +1527,11 @@ def test_reset_problem(self): result = block.reset_problem(get_request_dict) # Expect that the request was successful - assert (('success' in result) and result['success']) + assert ("success" in result) and result["success"] # Expect that the problem HTML is retrieved - assert 'html' in result - assert result['html'] == '
                    Test HTML
                    ' + assert "html" in result + assert result["html"] == "
                    Test HTML
                    " # Expect that the problem was reset block.new_lcp.assert_called_once_with(None) @@ -1508,7 +1541,7 @@ def test_reset_problem_closed(self): block = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS) # Simulate that the problem is closed - with patch('xmodule.capa_block.ProblemBlock.closed') as mock_closed: + with patch("xmodule.capa_block.ProblemBlock.closed") as mock_closed: mock_closed.return_value = True # Try to reset the problem @@ -1516,7 +1549,7 @@ def test_reset_problem_closed(self): result = block.reset_problem(get_request_dict) # Expect that the problem was NOT reset - assert (('success' in result) and (not result['success'])) + assert ("success" in result) and (not result["success"]) def test_reset_problem_not_done(self): # Simulate that the problem is NOT done @@ -1527,7 +1560,7 @@ def test_reset_problem_not_done(self): result = block.reset_problem(get_request_dict) # Expect that the problem was NOT reset - assert (('success' in result) and (not result['success'])) + assert ("success" in result) and (not result["success"]) def test_rescore_problem_correct(self): @@ -1535,17 +1568,17 @@ def test_rescore_problem_correct(self): # Simulate that all answers are marked correct, no matter # what the input is, by patching LoncapaResponse.evaluate_answers() - with patch('xmodule.capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers: + with patch("xmodule.capa.responsetypes.LoncapaResponse.evaluate_answers") as mock_evaluate_answers: mock_evaluate_answers.return_value = CorrectMap( answer_id=CapaFactory.answer_key(), - correctness='correct', + correctness="correct", npoints=1, ) - with patch('xmodule.capa.correctmap.CorrectMap.is_correct') as mock_is_correct: + with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = True # Check the problem - get_request_dict = {CapaFactory.input_key(): '1'} + get_request_dict = {CapaFactory.input_key(): "1"} block.submit_problem(get_request_dict) block.rescore(only_if_higher=False) @@ -1555,7 +1588,7 @@ def test_rescore_problem_correct(self): # Expect that the number of attempts is not incremented assert block.attempts == 1 # and that this was considered attempt number 1 for grading purposes - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 def test_rescore_problem_additional_correct(self): # make sure it also works when new correct answer has been added @@ -1563,17 +1596,17 @@ def test_rescore_problem_additional_correct(self): answer_id = CapaFactory.answer_key() # Check the problem - get_request_dict = {CapaFactory.input_key(): '1'} + get_request_dict = {CapaFactory.input_key(): "1"} result = block.submit_problem(get_request_dict) # Expect that the problem is marked incorrect and user didn't earn score - assert result['success'] == 'incorrect' + assert result["success"] == "incorrect" assert block.get_score() == (0, 1) - assert block.correct_map[answer_id]['correctness'] == 'incorrect' + assert block.correct_map[answer_id]["correctness"] == "incorrect" # Expect that the number of attempts has incremented to 1 assert block.attempts == 1 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 # Simulate that after making an incorrect answer to the correct answer # the new calculated score is (1,1) @@ -1581,20 +1614,20 @@ def test_rescore_problem_additional_correct(self): # In case of rescore with only_if_higher=True it should update score of block # if previous score was lower - with patch('xmodule.capa.correctmap.CorrectMap.is_correct') as mock_is_correct: + with patch("xmodule.capa.correctmap.CorrectMap.is_correct") as mock_is_correct: mock_is_correct.return_value = True block.set_score(block.score_from_lcp(block.lcp)) - with patch('xmodule.capa.responsetypes.NumericalResponse.get_staff_ans') as get_staff_ans: + with patch("xmodule.capa.responsetypes.NumericalResponse.get_staff_ans") as get_staff_ans: get_staff_ans.return_value = 1 + 0j block.rescore(only_if_higher=True) # Expect that the problem is marked correct and user earned the score assert block.get_score() == (1, 1) - assert block.correct_map[answer_id]['correctness'] == 'correct' + assert block.correct_map[answer_id]["correctness"] == "correct" # Expect that the number of attempts is not incremented assert block.attempts == 1 # and hence that this was still considered the first attempt for grading purposes - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 def test_rescore_problem_incorrect(self): # make sure it also works when attempts have been reset, @@ -1603,8 +1636,8 @@ def test_rescore_problem_incorrect(self): # Simulate that all answers are marked incorrect, no matter # what the input is, by patching LoncapaResponse.evaluate_answers() - with patch('xmodule.capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers: - mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect') + with patch("xmodule.capa.responsetypes.LoncapaResponse.evaluate_answers") as mock_evaluate_answers: + mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), "incorrect") block.rescore(only_if_higher=False) # Expect that the problem is marked incorrect @@ -1613,9 +1646,9 @@ def test_rescore_problem_incorrect(self): # Expect that the number of attempts is not incremented assert block.attempts == 0 # and that this is treated as the first attempt for grading purposes - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 - @patch('xmodule.capa_block.ProblemBlock.get_rescore_with_grading_method') + @patch("xmodule.capa_block.ProblemBlock.get_rescore_with_grading_method") def test_rescore_problem_with_grading_method_disable(self, mock_get_rescore: Mock): """ Test the rescore method with grading method disabled. @@ -1626,7 +1659,7 @@ def test_rescore_problem_with_grading_method_disable(self, mock_get_rescore: Moc block.rescore(only_if_higher=False) assert block.attempts == 0 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 mock_get_rescore.assert_not_called() @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) @@ -1638,16 +1671,16 @@ def test_rescore_problem_with_grading_method_enable(self): block = CapaFactory.create(attempts=0, done=True) with patch.object( - ProblemBlock, 'get_rescore_with_grading_method', wraps=block.get_rescore_with_grading_method + ProblemBlock, "get_rescore_with_grading_method", wraps=block.get_rescore_with_grading_method ) as mock_get_rescore: block.rescore(only_if_higher=False) assert block.attempts == 0 - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 mock_get_rescore.assert_called() - @patch('xmodule.capa_block.ProblemBlock.publish_grade') + @patch("xmodule.capa_block.ProblemBlock.publish_grade") def test_rescore_problem_grading_method_disable_to_enable(self, mock_publish_grade: Mock): """ Test the rescore method the grading method is disabled and then enabled. @@ -1657,20 +1690,18 @@ def test_rescore_problem_grading_method_disable_to_enable(self, mock_publish_gra """ block = CapaFactory.create(attempts=0, max_attempts=3) - get_request_dict = {CapaFactory.input_key(): '3.21'} + get_request_dict = {CapaFactory.input_key(): "3.21"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.45'} + get_request_dict = {CapaFactory.input_key(): "3.45"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) # Disabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=False + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=False ): # Score is the last score assert block.score == Score(raw_earned=1, raw_possible=1) @@ -1678,46 +1709,38 @@ def test_rescore_problem_grading_method_disable_to_enable(self, mock_publish_gra block.rescore(only_if_higher=False) # Still Score is the last score - mock_publish_grade.assert_called_with( - score=Score(raw_earned=1, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=1, raw_possible=1), only_if_higher=False) # Enabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=True + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=True ): with patch( - 'xmodule.capa.capa_problem.LoncapaProblem.is_grading_method_enabled', + "xmodule.capa.capa_problem.LoncapaProblem.is_grading_method_enabled", new_callable=PropertyMock, - return_value=True + return_value=True, ): # Change grading method to 'first_score' - block.grading_method = 'first_score' + block.grading_method = "first_score" block.rescore(only_if_higher=False) - mock_publish_grade.assert_called_with( - score=Score(raw_earned=0, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=0, raw_possible=1), only_if_higher=False) # Change grading method to 'highest_score' - block.grading_method = 'highest_score' + block.grading_method = "highest_score" block.rescore(only_if_higher=False) - mock_publish_grade.assert_called_with( - score=Score(raw_earned=1, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=1, raw_possible=1), only_if_higher=False) # Change grading method to 'average_score' - block.grading_method = 'average_score' + block.grading_method = "average_score" block.rescore(only_if_higher=False) mock_publish_grade.assert_called_with( score=Score(raw_earned=0.33, raw_possible=1), only_if_higher=False ) - @patch('xmodule.capa_block.ProblemBlock.publish_grade') + @patch("xmodule.capa_block.ProblemBlock.publish_grade") def test_rescore_problem_grading_method_enable_to_disable(self, mock_publish_grade: Mock): """ Test the rescore method the grading method is enabled and then disabled. @@ -1727,48 +1750,42 @@ def test_rescore_problem_grading_method_enable_to_disable(self, mock_publish_gra """ block = CapaFactory.create(attempts=0, max_attempts=3) - get_request_dict = {CapaFactory.input_key(): '3.21'} + get_request_dict = {CapaFactory.input_key(): "3.21"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.45'} + get_request_dict = {CapaFactory.input_key(): "3.45"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) # Enabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=True + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=True ): with patch( - 'xmodule.capa.capa_problem.LoncapaProblem.is_grading_method_enabled', + "xmodule.capa.capa_problem.LoncapaProblem.is_grading_method_enabled", new_callable=PropertyMock, - return_value=True + return_value=True, ): # Grading method is 'last_score' - assert block.grading_method == 'last_score' + assert block.grading_method == "last_score" assert block.score == Score(raw_earned=1, raw_possible=1) # Change grading method to 'first_score' - block.grading_method = 'first_score' + block.grading_method = "first_score" block.rescore(only_if_higher=False) - mock_publish_grade.assert_called_with( - score=Score(raw_earned=0, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=0, raw_possible=1), only_if_higher=False) # Change grading method to 'highest_score' - block.grading_method = 'highest_score' + block.grading_method = "highest_score" block.rescore(only_if_higher=False) - mock_publish_grade.assert_called_with( - score=Score(raw_earned=1, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=1, raw_possible=1), only_if_higher=False) # Change grading method to 'average_score' - block.grading_method = 'average_score' + block.grading_method = "average_score" block.rescore(only_if_higher=False) mock_publish_grade.assert_called_with( @@ -1777,16 +1794,14 @@ def test_rescore_problem_grading_method_enable_to_disable(self, mock_publish_gra # Disabled grading method with patch( - 'xmodule.capa_block.ProblemBlock.is_grading_method_enabled', - new_callable=PropertyMock, - return_value=False + "xmodule.capa_block.ProblemBlock.is_grading_method_enabled", new_callable=PropertyMock, return_value=False ): block.rescore(only_if_higher=False) # The score is the last score assert block.score == Score(raw_earned=1, raw_possible=1) @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) - @patch('xmodule.capa_block.ProblemBlock.publish_grade') + @patch("xmodule.capa_block.ProblemBlock.publish_grade") def test_rescore_problem_update_grading_method(self, mock_publish_grade: Mock): """ Test the rescore method when the grading method is updated. @@ -1796,42 +1811,36 @@ def test_rescore_problem_update_grading_method(self, mock_publish_grade: Mock): """ block = CapaFactory.create(attempts=0, max_attempts=3) - get_request_dict = {CapaFactory.input_key(): '3.21'} + get_request_dict = {CapaFactory.input_key(): "3.21"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.45'} + get_request_dict = {CapaFactory.input_key(): "3.45"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) # Grading method is 'last_score' - assert block.grading_method == 'last_score' + assert block.grading_method == "last_score" assert block.score == Score(raw_earned=1, raw_possible=1) # Change grading method to 'first_score' - block.grading_method = 'first_score' + block.grading_method = "first_score" block.rescore(only_if_higher=False) - mock_publish_grade.assert_called_with( - score=Score(raw_earned=0, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=0, raw_possible=1), only_if_higher=False) # Change grading method to 'highest_score' - block.grading_method = 'highest_score' + block.grading_method = "highest_score" block.rescore(only_if_higher=False) - mock_publish_grade.assert_called_with( - score=Score(raw_earned=1, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=1, raw_possible=1), only_if_higher=False) # Change grading method to 'average_score' - block.grading_method = 'average_score' + block.grading_method = "average_score" block.rescore(only_if_higher=False) - mock_publish_grade.assert_called_with( - score=Score(raw_earned=0.33, raw_possible=1), only_if_higher=False - ) + mock_publish_grade.assert_called_with(score=Score(raw_earned=0.33, raw_possible=1), only_if_higher=False) def test_rescore_problem_not_done(self): # Simulate that the problem is NOT done @@ -1845,7 +1854,7 @@ def test_rescore_problem_not_supported(self): block = CapaFactory.create(done=True) # Try to rescore the problem, and get exception - with patch('xmodule.capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring: + with patch("xmodule.capa.capa_problem.LoncapaProblem.supports_rescoring") as mock_supports_rescoring: mock_supports_rescoring.return_value = False with pytest.raises(NotImplementedError): block.rescore(only_if_higher=False) @@ -1855,10 +1864,10 @@ def test_calculate_score_list(self): Test that the `calculate_score_list` method returns the correct list of scores. """ block = CapaFactory.create(correct=True) - correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=1) + correct_map = CorrectMap(answer_id="1_2_1", correctness="correct", npoints=1) block.lcp.correct_map_history = [correct_map, correct_map] - with patch.object(block.lcp, 'calculate_score', return_value={'score': 1, 'total': 2}): + with patch.object(block.lcp, "calculate_score", return_value={"score": 1, "total": 2}): result = block.calculate_score_list() expected_result = [Score(raw_earned=1, raw_possible=2), Score(raw_earned=1, raw_possible=2)] self.assertEqual(result, expected_result) @@ -1873,7 +1882,7 @@ def test_calculate_score_list_empty(self): block = CapaFactory.create(correct=True) block.lcp.correct_map_history = [] - with patch.object(block.lcp, 'calculate_score', return_value=Mock()): + with patch.object(block.lcp, "calculate_score", return_value=Mock()): result = block.calculate_score_list() self.assertEqual(result, []) block.lcp.calculate_score.assert_not_called() @@ -1886,21 +1895,21 @@ def test_update_correctness_list_updates_attempt(self): block.update_correctness_list() - self.assertEqual(block.lcp.context['attempt'], 1) + self.assertEqual(block.lcp.context["attempt"], 1) def test_update_correctness_list_with_history(self): """ Test that the `update_correctness_list` method updates the correct map history. """ block = CapaFactory.create(correct=True, attempts=2) - correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=1) - student_answers = {'1_2_1': 'abcd'} + correct_map = CorrectMap(answer_id="1_2_1", correctness="correct", npoints=1) + student_answers = {"1_2_1": "abcd"} block.correct_map_history = [correct_map] block.student_answers_history = [student_answers] - with patch.object(block.lcp, 'get_grade_from_current_answers', return_value=correct_map): + with patch.object(block.lcp, "get_grade_from_current_answers", return_value=correct_map): block.update_correctness_list() - self.assertEqual(block.lcp.context['attempt'], 2) + self.assertEqual(block.lcp.context["attempt"], 2) block.lcp.get_grade_from_current_answers.assert_called_once_with(student_answers, correct_map) self.assertEqual(block.lcp.correct_map_history, [correct_map]) self.assertEqual(block.lcp.correct_map.get_dict(), correct_map.get_dict()) @@ -1916,9 +1925,9 @@ def test_update_correctness_list_without_history(self): block.correct_map_history = [] block.student_answers_history = [] - with patch.object(block.lcp, 'get_grade_from_current_answers', return_value=Mock()): + with patch.object(block.lcp, "get_grade_from_current_answers", return_value=Mock()): block.update_correctness_list() - self.assertEqual(block.lcp.context['attempt'], 1) + self.assertEqual(block.lcp.context["attempt"], 1) block.lcp.get_grade_from_current_answers.assert_not_called() @override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS) @@ -1927,9 +1936,9 @@ def test_get_rescore_with_grading_method(self): Test that the `get_rescore_with_grading_method` method returns the correct score. """ block = CapaFactory.create(done=True, attempts=0, max_attempts=2) - get_request_dict = {CapaFactory.input_key(): '3.21'} + get_request_dict = {CapaFactory.input_key(): "3.21"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) result = block.get_rescore_with_grading_method() @@ -1942,9 +1951,9 @@ def test_get_score_with_grading_method(self): returns the correct score based on the grading method. """ block = CapaFactory.create(done=True, attempts=0, max_attempts=2) - get_request_dict = {CapaFactory.input_key(): '3.21'} + get_request_dict = {CapaFactory.input_key(): "3.21"} block.submit_problem(get_request_dict) - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) expected_score = Score(raw_earned=1, raw_possible=1) @@ -1953,7 +1962,7 @@ def test_get_score_with_grading_method(self): self.assertEqual(score, expected_score) self.assertEqual(block.score, expected_score) - @patch('xmodule.capa_block.ProblemBlock.score_from_lcp') + @patch("xmodule.capa_block.ProblemBlock.score_from_lcp") def test_get_score_with_grading_method_updates_score(self, mock_score_from_lcp: Mock): """ Test that the `get_score_with_grading_method` method returns the correct score. @@ -1978,7 +1987,7 @@ def test_get_score_with_grading_method_calls_grading_method_handler(self): block = CapaFactory.create(attempts=1) current_score = Score(raw_earned=0, raw_possible=1) - with patch('xmodule.capa_block.GradingMethodHandler') as mock_handler: + with patch("xmodule.capa_block.GradingMethodHandler") as mock_handler: mock_handler.return_value.get_score.return_value = current_score block.get_score_with_grading_method(current_score) mock_handler.assert_called_once_with( @@ -1993,6 +2002,7 @@ class CustomCapaFactory(CapaFactory): """ A factory for creating a Capa problem with arbitrary xml. """ + sample_problem_xml = textwrap.dedent(xml) return CustomCapaFactory @@ -2000,16 +2010,18 @@ class CustomCapaFactory(CapaFactory): def test_codejail_error_upon_problem_creation(self): # Simulate a codejail safe_exec failure upon problem creation. # Create a problem with some script attached. - xml_str = textwrap.dedent(""" + xml_str = textwrap.dedent( + """ - """) + """ + ) factory = self.capa_factory_for_problem_xml(xml_str) # When codejail safe_exec fails upon problem creation, a LoncapaProblemError should be raised. with pytest.raises(LoncapaProblemError): - with patch('xmodule.capa.capa_problem.safe_exec') as mock_safe_exec: + with patch("xmodule.capa.capa_problem.safe_exec") as mock_safe_exec: mock_safe_exec.side_effect = SafeExecException() factory.create() @@ -2020,19 +2032,19 @@ def _rescore_problem_error_helper(self, exception_class): CapaFactory.answer_key() # Check the problem - get_request_dict = {CapaFactory.input_key(): '1'} + get_request_dict = {CapaFactory.input_key(): "1"} block.submit_problem(get_request_dict) # Simulate answering a problem that raises the exception - with patch('xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers') as mock_rescore: - mock_rescore.side_effect = exception_class('test error \u03a9') + with patch("xmodule.capa.capa_problem.LoncapaProblem.get_grade_from_current_answers") as mock_rescore: + mock_rescore.side_effect = exception_class("test error \u03a9") with pytest.raises(exception_class): block.rescore(only_if_higher=False) # Expect that the number of attempts is NOT incremented assert block.attempts == 1 # and that this was considered the first attempt for grading purposes - assert block.lcp.context['attempt'] == 1 + assert block.lcp.context["attempt"] == 1 def test_rescore_problem_student_input_error(self): self._rescore_problem_error_helper(StudentInputError) @@ -2047,68 +2059,61 @@ def test_save_problem(self): block = CapaFactory.create(done=False) # Save the problem - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.save_problem(get_request_dict) # Expect that answers are saved to the problem - expected_answers = {CapaFactory.answer_key(): '3.14'} + expected_answers = {CapaFactory.answer_key(): "3.14"} assert block.lcp.student_answers == expected_answers # Expect that the result is success - assert (('success' in result) and result['success']) + assert ("success" in result) and result["success"] def test_save_problem_closed(self): block = CapaFactory.create(done=False) # Simulate that the problem is closed - with patch('xmodule.capa_block.ProblemBlock.closed') as mock_closed: + with patch("xmodule.capa_block.ProblemBlock.closed") as mock_closed: mock_closed.return_value = True # Try to save the problem - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.save_problem(get_request_dict) # Expect that the result is failure - assert (('success' in result) and (not result['success'])) + assert ("success" in result) and (not result["success"]) - @ddt.data( - RANDOMIZATION.ALWAYS, - 'true' - ) + @ddt.data(RANDOMIZATION.ALWAYS, "true") def test_save_problem_submitted_with_randomize(self, rerandomize): # Capa XModule treats 'always' and 'true' equivalently block = CapaFactory.create(rerandomize=rerandomize, done=True) # Try to save - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.save_problem(get_request_dict) # Expect that we cannot save - assert (('success' in result) and (not result['success'])) + assert ("success" in result) and (not result["success"]) - @ddt.data( - RANDOMIZATION.NEVER, - 'false', - RANDOMIZATION.PER_STUDENT - ) + @ddt.data(RANDOMIZATION.NEVER, "false", RANDOMIZATION.PER_STUDENT) def test_save_problem_submitted_no_randomize(self, rerandomize): # Capa XBlock treats 'false' and 'per_student' equivalently block = CapaFactory.create(rerandomize=rerandomize, done=True) # Try to save - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} result = block.save_problem(get_request_dict) # Expect that we succeed - assert (('success' in result) and result['success']) + assert ("success" in result) and result["success"] def test_submit_button_name(self): block = CapaFactory.create(attempts=0) - assert block.submit_button_name() == 'Submit' + assert block.submit_button_name() == "Submit" def test_submit_button_submitting_name(self): block = CapaFactory.create(attempts=1, max_attempts=10) - assert block.submit_button_submitting_name() == 'Submitting' + assert block.submit_button_submitting_name() == "Submitting" def test_should_enable_submit_button(self): @@ -2249,34 +2254,25 @@ def test_should_show_save_button(self): def test_should_show_save_button_force_save_button(self): # If we're after the deadline, do NOT show the save button # even though we're forcing a save - block = CapaFactory.create(due=self.yesterday_str, - force_save_button="true", - done=True) + block = CapaFactory.create(due=self.yesterday_str, force_save_button="true", done=True) assert not block.should_show_save_button() # If the user is out of attempts, do NOT show the save button attempts = random.randint(1, 10) - block = CapaFactory.create(attempts=attempts, - max_attempts=attempts, - force_save_button="true", - done=True) + block = CapaFactory.create(attempts=attempts, max_attempts=attempts, force_save_button="true", done=True) assert not block.should_show_save_button() # Otherwise, if we force the save button, # then show it even if we would ordinarily # require a reset first - block = CapaFactory.create(force_save_button="true", - rerandomize=RANDOMIZATION.ALWAYS, - done=True) + block = CapaFactory.create(force_save_button="true", rerandomize=RANDOMIZATION.ALWAYS, done=True) assert block.should_show_save_button() - block = CapaFactory.create(force_save_button="true", - rerandomize="true", - done=True) + block = CapaFactory.create(force_save_button="true", rerandomize="true", done=True) assert block.should_show_save_button() def test_no_max_attempts(self): - block = CapaFactory.create(max_attempts='') + block = CapaFactory.create(max_attempts="") html = block.get_problem_html() assert html is not None # assert that we got here without exploding @@ -2296,7 +2292,7 @@ def test_get_problem_html(self): block.should_show_save_button = Mock(return_value=show_save_button) # Patch the capa problem's HTML rendering - with patch('xmodule.capa.capa_problem.LoncapaProblem.get_html') as mock_html: + with patch("xmodule.capa.capa_problem.LoncapaProblem.get_html") as mock_html: mock_html.return_value = "
                    Test Problem HTML
                    " # Render the problem HTML @@ -2306,21 +2302,21 @@ def test_get_problem_html(self): html_encapsulated = block.get_problem_html(encapsulate=True) # Expect that we get the rendered template back - assert html == '
                    Test Template HTML
                    ' + assert html == "
                    Test Template HTML
                    " # Check the rendering context render_args, _ = render_template.call_args assert len(render_args) == 2 template_name = render_args[0] - assert template_name == 'problem.html' + assert template_name == "problem.html" context = render_args[1] - assert context['problem']['html'] == '
                    Test Problem HTML
                    ' - assert bool(context['should_enable_submit_button']) == enable_submit_button - assert bool(context['reset_button']) == show_reset_button - assert bool(context['save_button']) == show_save_button - assert not context['demand_hint_possible'] + assert context["problem"]["html"] == "
                    Test Problem HTML
                    " + assert bool(context["should_enable_submit_button"]) == enable_submit_button + assert bool(context["reset_button"]) == show_reset_button + assert bool(context["save_button"]) == show_save_button + assert not context["demand_hint_possible"] # Assert that the encapsulated html contains the original html assert html in html_encapsulated @@ -2348,21 +2344,21 @@ def test_demand_hint(self): block = CapaFactory.create(xml=self.demand_xml, render_template=render_template) block.get_problem_html() # ignoring html result context = render_template.call_args[0][1] - assert context['demand_hint_possible'] - assert context['should_enable_next_hint'] + assert context["demand_hint_possible"] + assert context["should_enable_next_hint"] # Check the AJAX call that gets the hint by index result = block.get_demand_hint(0) - assert result['hint_index'] == 0 - assert result['should_enable_next_hint'] + assert result["hint_index"] == 0 + assert result["should_enable_next_hint"] result = block.get_demand_hint(1) - assert result['hint_index'] == 1 - assert not result['should_enable_next_hint'] + assert result["hint_index"] == 1 + assert not result["should_enable_next_hint"] result = block.get_demand_hint(2) # here the server wraps around to index 0 - assert result['hint_index'] == 0 - assert result['should_enable_next_hint'] + assert result["hint_index"] == 0 + assert result["should_enable_next_hint"] def test_single_demand_hint(self): """ @@ -2386,13 +2382,13 @@ def test_single_demand_hint(self): block = CapaFactory.create(xml=test_xml, render_template=render_template) block.get_problem_html() # ignoring html result context = render_template.call_args[0][1] - assert context['demand_hint_possible'] - assert context['should_enable_next_hint'] + assert context["demand_hint_possible"] + assert context["should_enable_next_hint"] # Check the AJAX call that gets the hint by index result = block.get_demand_hint(0) - assert result['hint_index'] == 0 - assert not result['should_enable_next_hint'] + assert result["hint_index"] == 0 + assert not result["should_enable_next_hint"] def test_image_hint(self): """ @@ -2418,26 +2414,26 @@ def test_image_hint(self): block = CapaFactory.create(xml=test_xml, render_template=render_template) block.get_problem_html() # ignoring html result context = render_template.call_args[0][1] - assert context['demand_hint_possible'] - assert context['should_enable_next_hint'] + assert context["demand_hint_possible"] + assert context["should_enable_next_hint"] # Check the AJAX call that gets the hint by index result = block.get_demand_hint(0) - assert result['hint_index'] == 0 - assert not result['should_enable_next_hint'] + assert result["hint_index"] == 0 + assert not result["should_enable_next_hint"] def test_demand_hint_logging(self): """ Test calling get_demand_hunt() results in an event being published. """ block = CapaFactory.create(xml=self.demand_xml) - with patch.object(block.runtime, 'publish') as mock_publish: + with patch.object(block.runtime, "publish") as mock_publish: block.get_problem_html() block.get_demand_hint(0) mock_publish.assert_called_with( - block, 'edx.problem.hint.demandhint_displayed', - {'hint_index': 0, 'module_id': str(block.location), - 'hint_text': 'Demand 1', 'hint_len': 2} + block, + "edx.problem.hint.demandhint_displayed", + {"hint_index": 0, "module_id": str(block.location), "hint_text": "Demand 1", "hint_len": 2}, ) def test_input_state_consistency(self): @@ -2477,7 +2473,7 @@ def test_get_problem_html_error(self): # Check the rendering context render_args, _ = render_template.call_args context = render_args[1] - assert 'error' in context['problem']['html'] + assert "error" in context["problem"]["html"] # Expect that the block has created a new dummy problem with the error assert original_problem != block.lcp @@ -2504,7 +2500,7 @@ def test_get_problem_html_error_preview(self): # Check the rendering context render_args, _ = render_template.call_args context = render_args[1] - assert error_msg in context['problem']['html'] + assert error_msg in context["problem"]["html"] @override_settings(DEBUG=True) def test_get_problem_html_error_w_debug(self): @@ -2527,15 +2523,10 @@ def test_get_problem_html_error_w_debug(self): # Check the rendering context render_args, _ = render_template.call_args context = render_args[1] - assert error_msg in context['problem']['html'] + assert error_msg in context["problem"]["html"] @ddt.data( - 'false', - 'true', - RANDOMIZATION.NEVER, - RANDOMIZATION.PER_STUDENT, - RANDOMIZATION.ALWAYS, - RANDOMIZATION.ONRESET + "false", "true", RANDOMIZATION.NEVER, RANDOMIZATION.PER_STUDENT, RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET ) def test_random_seed_no_change(self, rerandomize): @@ -2554,7 +2545,7 @@ def test_random_seed_no_change(self, rerandomize): assert seed == 1, "Seed should always be 1 when rerandomize='%s'" % rerandomize # Check the problem - get_request_dict = {CapaFactory.input_key(): '3.14'} + get_request_dict = {CapaFactory.input_key(): "3.14"} block.submit_problem(get_request_dict) # Expect that the seed is the same @@ -2567,12 +2558,7 @@ def test_random_seed_no_change(self, rerandomize): assert seed == block.seed @ddt.data( - 'false', - 'true', - RANDOMIZATION.NEVER, - RANDOMIZATION.PER_STUDENT, - RANDOMIZATION.ALWAYS, - RANDOMIZATION.ONRESET + "false", "true", RANDOMIZATION.NEVER, RANDOMIZATION.PER_STUDENT, RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET ) def test_random_seed_with_reset(self, rerandomize): """ @@ -2597,13 +2583,13 @@ def _reset_and_get_seed(block): return block.seed def _retry_and_check(num_tries, test_func): - ''' + """ Returns True if *test_func* was successful (returned True) within *num_tries* attempts *test_func* must be a function of the form test_func() -> bool - ''' + """ success = False for __ in range(num_tries): if test_func() is True: @@ -2622,9 +2608,7 @@ def _retry_and_check(num_tries, test_func): # is set to 'never' -- it should still be 1 # The seed also stays the same if we're randomizing # 'per_student': the same student should see the same problem - if rerandomize in [RANDOMIZATION.NEVER, - 'false', - RANDOMIZATION.PER_STUDENT]: + if rerandomize in [RANDOMIZATION.NEVER, "false", RANDOMIZATION.PER_STUDENT]: assert seed == _reset_and_get_seed(block) # Otherwise, we expect the seed to change @@ -2637,16 +2621,11 @@ def _retry_and_check(num_tries, test_func): success = _retry_and_check(60, lambda: _reset_and_get_seed(block) != seed) assert block.seed is not None - msg = 'Could not get a new seed from reset after 60 tries' + msg = "Could not get a new seed from reset after 60 tries" assert success, msg @ddt.data( - 'false', - 'true', - RANDOMIZATION.NEVER, - RANDOMIZATION.PER_STUDENT, - RANDOMIZATION.ALWAYS, - RANDOMIZATION.ONRESET + "false", "true", RANDOMIZATION.NEVER, RANDOMIZATION.PER_STUDENT, RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET ) def test_random_seed_with_reset_question_unsubmitted(self, rerandomize): """ @@ -2675,12 +2654,7 @@ def _reset_and_get_seed(block): # the seed should never change because the student hasn't finished the problem assert seed == _reset_and_get_seed(block) - @ddt.data( - RANDOMIZATION.ALWAYS, - RANDOMIZATION.PER_STUDENT, - 'true', - RANDOMIZATION.ONRESET - ) + @ddt.data(RANDOMIZATION.ALWAYS, RANDOMIZATION.PER_STUDENT, "true", RANDOMIZATION.ONRESET) def test_random_seed_bins(self, rerandomize): # Assert that we are limiting the number of possible seeds. # Get a bunch of seeds, they should all be in 0-999. @@ -2690,8 +2664,8 @@ def test_random_seed_bins(self, rerandomize): assert 0 <= block.seed < 1000 i -= 1 - @patch('xmodule.capa_block.log') - @patch('xmodule.capa_block.Progress') + @patch("xmodule.capa_block.log") + @patch("xmodule.capa_block.Progress") def test_get_progress_error(self, mock_progress, mock_log): """ Check that an exception given in `Progress` produces a `log.exception` call. @@ -2701,10 +2675,10 @@ def test_get_progress_error(self, mock_progress, mock_log): mock_progress.side_effect = error_type block = CapaFactory.create() assert block.get_progress() is None - mock_log.exception.assert_called_once_with('Got bad progress') + mock_log.exception.assert_called_once_with("Got bad progress") mock_log.reset_mock() - @patch('xmodule.capa_block.Progress') + @patch("xmodule.capa_block.Progress") def test_get_progress_no_error_if_weight_zero(self, mock_progress): """ Check that if the weight is 0 get_progress does not try to create a Progress object. @@ -2716,7 +2690,7 @@ def test_get_progress_no_error_if_weight_zero(self, mock_progress): assert progress is None assert not mock_progress.called - @patch('xmodule.capa_block.Progress') + @patch("xmodule.capa_block.Progress") def test_get_progress_calculate_progress_fraction(self, mock_progress): """ Check that score and total are calculated correctly for the progress fraction. @@ -2744,9 +2718,7 @@ def test_get_display_progress_show_correctness(self, show_correctness, is_correc """ Check that score and total are calculated correctly for the progress fraction. """ - block = CapaFactory.create(correct=is_correct, - show_correctness=show_correctness, - due=self.tomorrow_str) + block = CapaFactory.create(correct=is_correct, show_correctness=show_correctness, due=self.tomorrow_str) block.weight = 1 score, total = block.get_display_progress() assert score == expected_score @@ -2766,10 +2738,11 @@ def test_get_problem(self): Check that get_problem() returns the expected dictionary. """ block = CapaFactory.create() - assert block.get_problem('data') == {'html': block.get_problem_html(encapsulate=False)} + assert block.get_problem("data") == {"html": block.get_problem_html(encapsulate=False)} # Standard question with shuffle="true" used by a few tests - common_shuffle_xml = textwrap.dedent(""" + common_shuffle_xml = textwrap.dedent( + """ @@ -2780,7 +2753,8 @@ def test_get_problem(self): - """) + """ + ) def test_check_unmask(self): """ @@ -2788,20 +2762,23 @@ def test_check_unmask(self): unmasked names should appear in the publish event_info. """ block = CapaFactory.create(xml=self.common_shuffle_xml) - with patch.object(block.runtime, 'publish') as mock_publish: - get_request_dict = {CapaFactory.input_key(): 'choice_3'} # the correct choice + with patch.object(block.runtime, "publish") as mock_publish: + get_request_dict = {CapaFactory.input_key(): "choice_3"} # the correct choice block.submit_problem(get_request_dict) mock_call = mock_publish.mock_calls[1] event_info = mock_call[1][2] - assert event_info['answers'][CapaFactory.answer_key()] == 'choice_3' + assert event_info["answers"][CapaFactory.answer_key()] == "choice_3" # 'permutation' key added to record how problem was shown - assert event_info['permutation'][CapaFactory.answer_key()] ==\ - ('shuffle', ['choice_3', 'choice_1', 'choice_2', 'choice_0']) - assert event_info['success'] == 'correct' + assert event_info["permutation"][CapaFactory.answer_key()] == ( + "shuffle", + ["choice_3", "choice_1", "choice_2", "choice_0"], + ) + assert event_info["success"] == "correct" def test_check_unmask_answerpool(self): """Check answer-pool question publish uses unmasked names""" - xml = textwrap.dedent(""" + xml = textwrap.dedent( + """ @@ -2812,25 +2789,28 @@ def test_check_unmask_answerpool(self): - """) + """ + ) block = CapaFactory.create(xml=xml) - with patch.object(block.runtime, 'publish') as mock_publish: - get_request_dict = {CapaFactory.input_key(): 'choice_2'} # mask_X form when masking enabled + with patch.object(block.runtime, "publish") as mock_publish: + get_request_dict = {CapaFactory.input_key(): "choice_2"} # mask_X form when masking enabled block.submit_problem(get_request_dict) mock_call = mock_publish.mock_calls[1] event_info = mock_call[1][2] - assert event_info['answers'][CapaFactory.answer_key()] == 'choice_2' + assert event_info["answers"][CapaFactory.answer_key()] == "choice_2" # 'permutation' key added to record how problem was shown - assert event_info['permutation'][CapaFactory.answer_key()] ==\ - ('answerpool', ['choice_1', 'choice_3', 'choice_2', 'choice_0']) - assert event_info['success'] == 'incorrect' + assert event_info["permutation"][CapaFactory.answer_key()] == ( + "answerpool", + ["choice_1", "choice_3", "choice_2", "choice_0"], + ) + assert event_info["success"] == "incorrect" @ddt.unpack @ddt.data( - {'display_name': None, 'expected_display_name': 'problem'}, - {'display_name': '', 'expected_display_name': 'problem'}, - {'display_name': ' ', 'expected_display_name': 'problem'}, - {'display_name': 'CAPA 101', 'expected_display_name': 'CAPA 101'} + {"display_name": None, "expected_display_name": "problem"}, + {"display_name": "", "expected_display_name": "problem"}, + {"display_name": " ", "expected_display_name": "problem"}, + {"display_name": "CAPA 101", "expected_display_name": "CAPA 101"}, ) def test_problem_display_name_with_default(self, display_name, expected_display_name): """ @@ -2840,8 +2820,8 @@ def test_problem_display_name_with_default(self, display_name, expected_display_ assert block.display_name_with_default == expected_display_name @ddt.data( - '', - ' ', + "", + " ", ) def test_problem_no_display_name(self, display_name): """ @@ -2852,12 +2832,13 @@ def test_problem_no_display_name(self, display_name): block.get_problem_html() render_args, _ = render_template.call_args context = render_args[1] - assert context['problem']['name'] == block.location.block_type + assert context["problem"]["name"] == block.location.block_type @ddt.ddt class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=missing-class-docstring - sample_checkbox_problem_xml = textwrap.dedent(""" + sample_checkbox_problem_xml = textwrap.dedent( + """

                    Title

                    @@ -2888,9 +2869,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m
                    - """) + """ + ) - sample_dropdown_problem_xml = textwrap.dedent(""" + sample_dropdown_problem_xml = textwrap.dedent( + """

                    Dropdown problems allow learners to select only one option from a list of options.

                    @@ -2915,9 +2898,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m
                    - """) + """ + ) - sample_multichoice_problem_xml = textwrap.dedent(""" + sample_multichoice_problem_xml = textwrap.dedent( + """

                    Multiple choice problems allow learners to select only one option.

                    @@ -2951,9 +2936,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m
                    - """) + """ + ) - sample_numerical_input_problem_xml = textwrap.dedent(""" + sample_numerical_input_problem_xml = textwrap.dedent( + """

                    In a numerical input problem, learners enter numbers or a specific and relatively simple mathematical expression. Learners enter the response in plain text, and the system then converts the text to a symbolic @@ -2991,9 +2978,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m - """) + """ + ) - sample_text_input_problem_xml = textwrap.dedent(""" + sample_text_input_problem_xml = textwrap.dedent( + """

                    In text input problems, also known as "fill-in-the-blank" problems, learners enter text into a response field. The text can include letters and characters such as punctuation marks. The text that the learner @@ -3023,9 +3012,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m - """) + """ + ) - sample_checkboxes_with_hints_and_feedback_problem_xml = textwrap.dedent(""" + sample_checkboxes_with_hints_and_feedback_problem_xml = textwrap.dedent( + """

                    You can provide feedback for each option in a checkbox problem, with distinct feedback depending on whether or not the learner selects that option.

                    @@ -3076,9 +3067,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m A fruit contains seeds of the plant.
                    - """) + """ + ) - sample_dropdown_with_hints_and_feedback_problem_xml = textwrap.dedent(""" + sample_dropdown_with_hints_and_feedback_problem_xml = textwrap.dedent( + """

                    You can provide feedback for each available option in a dropdown problem.

                    @@ -3108,9 +3101,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m A fruit contains seeds of the plant.
                    - """) + """ + ) - sample_multichoice_with_hints_and_feedback_problem_xml = textwrap.dedent(""" + sample_multichoice_with_hints_and_feedback_problem_xml = textwrap.dedent( + """

                    You can provide feedback for each option in a multiple choice problem.

                    @@ -3141,9 +3136,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m A fruit contains seeds of the plant.
                    - """) + """ + ) - sample_numerical_input_with_hints_and_feedback_problem_xml = textwrap.dedent(""" + sample_numerical_input_with_hints_and_feedback_problem_xml = textwrap.dedent( + """

                    You can provide feedback for correct answers in numerical input problems. You cannot provide feedback for incorrect answers.

                    @@ -3177,9 +3174,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m n is the count of items in the set.
                    - """) + """ + ) - sample_text_input_with_hints_and_feedback_problem_xml = textwrap.dedent(""" + sample_text_input_with_hints_and_feedback_problem_xml = textwrap.dedent( + """

                    You can provide feedback for the correct answer in text input problems, as well as for specific incorrect answers.

                    @@ -3208,10 +3207,11 @@ class ProblemBlockXMLTest(unittest.TestCase): # lint-amnesty, pylint: disable=m Consider all 50 states, not just the continental United States.
                    - """) + """ + ) def _create_block(self, xml, name=None): - """ Creates a ProblemBlock to run test against """ + """Creates a ProblemBlock to run test against""" block = CapaFactory.create() block.data = xml if name: @@ -3220,18 +3220,20 @@ def _create_block(self, xml, name=None): @ddt.data(*sorted(responsetypes.registry.registered_tags())) def test_all_response_types(self, response_tag): - """ Tests that every registered response tag is correctly returned """ + """Tests that every registered response tag is correctly returned""" xml = "<{response_tag}>".format(response_tag=response_tag) name = "Some Capa Problem" block = self._create_block(xml, name=name) assert block.problem_types == {response_tag} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': [response_tag], - 'content': {'display_name': name, 'capa_content': ''}} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": [response_tag], + "content": {"display_name": name, "capa_content": ""}, + } def test_response_types_ignores_non_response_tags(self): - xml = textwrap.dedent(""" + xml = textwrap.dedent( + """

                    Label

                    Some comment
                    @@ -3244,17 +3246,20 @@ def test_response_types_ignores_non_response_tags(self):
                    - """) + """ + ) name = "Test Capa Problem" block = self._create_block(xml, name=name) - assert block.problem_types == {'multiplechoiceresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['multiplechoiceresponse'], - 'content': {'display_name': name, 'capa_content': 'Label Some comment Apple Banana Chocolate Donut'}} + assert block.problem_types == {"multiplechoiceresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["multiplechoiceresponse"], + "content": {"display_name": name, "capa_content": "Label Some comment Apple Banana Chocolate Donut"}, + } def test_response_types_multiple_tags(self): - xml = textwrap.dedent(""" + xml = textwrap.dedent( + """

                    Label

                    Some comment
                    @@ -3272,30 +3277,30 @@ def test_response_types_multiple_tags(self):
                    - """) + """ + ) name = "Other Test Capa Problem" block = self._create_block(xml, name=name) - assert block.problem_types == {'multiplechoiceresponse', 'optionresponse'} + assert block.problem_types == {"multiplechoiceresponse", "optionresponse"} # We are converting problem_types to a set to compare it later without taking into account the order # the reasoning behind is that the problem_types (property) is represented by dict and when it is converted # to list its ordering is different everytime. indexing_result = block.index_dictionary() - indexing_result['problem_types'] = set(indexing_result['problem_types']) + indexing_result["problem_types"] = set(indexing_result["problem_types"]) self.assertDictEqual( - indexing_result, { - 'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': {"optionresponse", "multiplechoiceresponse"}, - 'content': { - 'display_name': name, - 'capa_content': "Label Some comment Donut Buggy '1','2'" - }, - } + indexing_result, + { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": {"optionresponse", "multiplechoiceresponse"}, + "content": {"display_name": name, "capa_content": "Label Some comment Donut Buggy '1','2'"}, + }, ) def test_solutions_not_indexed(self): - xml = textwrap.dedent(""" + xml = textwrap.dedent( + """ Test solution. Test solution with attribute. @@ -3324,18 +3329,21 @@ def test_solutions_not_indexed(self): Test hint. Test hintpart. - """) + """ + ) name = "Blank Common Capa Problem" block = self._create_block(xml, name=name) - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': [], - 'content': {'display_name': name, 'capa_content': ''}} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": [], + "content": {"display_name": name, "capa_content": ""}, + } def test_indexing_checkboxes(self): name = "Checkboxes" block = self._create_block(self.sample_checkbox_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ Title Description Example @@ -3346,32 +3354,38 @@ def test_indexing_checkboxes(self): French Hungarian Note: Make sure you select all of the correct options—there may be more than one! - """) - assert block.problem_types == {'choiceresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['choiceresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"choiceresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["choiceresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_dropdown(self): name = "Dropdown" block = self._create_block(self.sample_dropdown_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ Dropdown problems allow learners to select only one option from a list of options. Description You can use the following example problem as a model. Which of the following countries celebrates its independence on August 15? 'India','Spain','China','Bermuda' - """) - assert block.problem_types == {'optionresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['optionresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"optionresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["optionresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_multiple_choice(self): name = "Multiple Choice" block = self._create_block(self.sample_multichoice_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ Multiple choice problems allow learners to select only one option. When you add the problem, be sure to select Settings to specify a Display Name and other values. You can use the following example problem as a model. @@ -3380,17 +3394,20 @@ def test_indexing_multiple_choice(self): Germany Indonesia Russia - """) - assert block.problem_types == {'multiplechoiceresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['multiplechoiceresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"multiplechoiceresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["multiplechoiceresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_numerical_input(self): name = "Numerical Input" block = self._create_block(self.sample_numerical_input_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ In a numerical input problem, learners enter numbers or a specific and relatively simple mathematical expression. Learners enter the response in plain text, and the system then converts the text to a symbolic expression that learners can see below the response field. @@ -3402,17 +3419,20 @@ def test_indexing_numerical_input(self): You can use the following example problems as models. How many miles away from Earth is the sun? Use scientific notation to answer. The square of what number is -100? - """) - assert block.problem_types == {'numericalresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['numericalresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"numericalresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["numericalresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_text_input(self): name = "Text Input" block = self._create_block(self.sample_text_input_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ In text input problems, also known as "fill-in-the-blank" problems, learners enter text into a response field. The text can include letters and characters such as punctuation marks. The text that the learner enters must match your specified answer text exactly. You can specify more than one correct answer. @@ -3421,31 +3441,36 @@ def test_indexing_text_input(self): apply. You can use the following example problem as a model. What was the first post-secondary school in China to allow both male and female students? - """) - assert block.problem_types == {'stringresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['stringresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"stringresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["stringresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_non_latin_problem(self): - sample_text_input_problem_xml = textwrap.dedent(""" + sample_text_input_problem_xml = textwrap.dedent( + """

                    Δοκιμή με μεταβλητές με Ελληνικούς χαρακτήρες μέσα σε python: $FX1_VAL

                    - """) + """ + ) name = "Non latin Input" block = self._create_block(sample_text_input_problem_xml, name=name) capa_content = "Δοκιμή με μεταβλητές με Ελληνικούς χαρακτήρες μέσα σε python: $FX1_VAL" block_dict = block.index_dictionary() - assert block_dict['content']['capa_content'] == smart_str(capa_content) + assert block_dict["content"]["capa_content"] == smart_str(capa_content) def test_indexing_checkboxes_with_hints_and_feedback(self): name = "Checkboxes with Hints and Feedback" block = self._create_block(self.sample_checkboxes_with_hints_and_feedback_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ You can provide feedback for each option in a checkbox problem, with distinct feedback depending on whether or not the learner selects that option. You can also provide compound feedback for a specific combination of answers. For example, if you have @@ -3459,17 +3484,20 @@ def test_indexing_checkboxes_with_hints_and_feedback(self): pumpkin potato tomato - """) - assert block.problem_types == {'choiceresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['choiceresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"choiceresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["choiceresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_dropdown_with_hints_and_feedback(self): name = "Dropdown with Hints and Feedback" block = self._create_block(self.sample_dropdown_with_hints_and_feedback_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ You can provide feedback for each available option in a dropdown problem. You can also add hints for learners. Be sure to select Settings to specify a Display Name and other values that apply. @@ -3479,17 +3507,20 @@ def test_indexing_dropdown_with_hints_and_feedback(self): pumpkin potato tomato - """) - assert block.problem_types == {'optionresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['optionresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"optionresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["optionresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_multiple_choice_with_hints_and_feedback(self): name = "Multiple Choice with Hints and Feedback" block = self._create_block(self.sample_multichoice_with_hints_and_feedback_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ You can provide feedback for each option in a multiple choice problem. You can also add hints for learners. Be sure to select Settings to specify a Display Name and other values that apply. @@ -3499,17 +3530,20 @@ def test_indexing_multiple_choice_with_hints_and_feedback(self): pumpkin potato tomato - """) - assert block.problem_types == {'multiplechoiceresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['multiplechoiceresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"multiplechoiceresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["multiplechoiceresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_numerical_input_with_hints_and_feedback(self): name = "Numerical Input with Hints and Feedback" block = self._create_block(self.sample_numerical_input_with_hints_and_feedback_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ You can provide feedback for correct answers in numerical input problems. You cannot provide feedback for incorrect answers. Use feedback for the correct answer to reinforce the process for arriving at the numerical value. @@ -3517,17 +3551,20 @@ def test_indexing_numerical_input_with_hints_and_feedback(self): Be sure to select Settings to specify a Display Name and other values that apply. Use the following example problem as a model. What is the arithmetic mean for the following set of numbers? (1, 5, 6, 3, 5) - """) - assert block.problem_types == {'numericalresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['numericalresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"numericalresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["numericalresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_text_input_with_hints_and_feedback(self): name = "Text Input with Hints and Feedback" block = self._create_block(self.sample_text_input_with_hints_and_feedback_problem_xml, name=name) - capa_content = textwrap.dedent(""" + capa_content = textwrap.dedent( + """ You can provide feedback for the correct answer in text input problems, as well as for specific incorrect answers. Use feedback on expected incorrect answers to address common misconceptions and to provide guidance on @@ -3535,15 +3572,18 @@ def test_indexing_text_input_with_hints_and_feedback(self): Be sure to select Settings to specify a Display Name and other values that apply. Use the following example problem as a model. Which U.S. state has the largest land area? - """) - assert block.problem_types == {'stringresponse'} - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['stringresponse'], - 'content': {'display_name': name, 'capa_content': capa_content.replace('\n', ' ').strip()}} + """ + ) + assert block.problem_types == {"stringresponse"} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["stringresponse"], + "content": {"display_name": name, "capa_content": capa_content.replace("\n", " ").strip()}, + } def test_indexing_problem_with_html_tags(self): - sample_problem_xml = textwrap.dedent(""" + sample_problem_xml = textwrap.dedent( + """ @@ -3556,14 +3596,16 @@ def test_indexing_problem_with_html_tags(self): var alive; - """) + """ + ) name = "Mixed business" block = self._create_block(sample_problem_xml, name=name) capa_content = "This has HTML comment in it. HTML end." - assert block.index_dictionary() ==\ - {'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': [], - 'content': {'display_name': name, 'capa_content': capa_content}} + assert block.index_dictionary() == { + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": [], + "content": {"display_name": name, "capa_content": capa_content}, + } def test_indexing_problem_with_no_whitespace_between_tags(self): """ @@ -3571,10 +3613,10 @@ def test_indexing_problem_with_no_whitespace_between_tags(self): We want to make sure the index description is still readable and has whitespace. """ sample_problem_xml = ( - "" + '' "
                    Question text here.
                    " - "
                    Option A
                    " - "
                    Option B
                    " + '
                    Option A
                    ' + '
                    Option B
                    ' "
                    " "
                    " ) @@ -3582,9 +3624,9 @@ def test_indexing_problem_with_no_whitespace_between_tags(self): block = self._create_block(sample_problem_xml, name=name) capa_content = "Question text here. Option A Option B" assert block.index_dictionary() == { - 'content_type': ProblemBlock.INDEX_CONTENT_TYPE, - 'problem_types': ['choiceresponse'], - 'content': {'display_name': name, 'capa_content': capa_content}, + "content_type": ProblemBlock.INDEX_CONTENT_TYPE, + "problem_types": ["choiceresponse"], + "content": {"display_name": name, "capa_content": capa_content}, } def test_invalid_xml_handling(self): @@ -3592,10 +3634,12 @@ def test_invalid_xml_handling(self): Tests to confirm that invalid XML throws errors during xblock creation, so as not to allow bad data into modulestore. """ - sample_invalid_xml = textwrap.dedent(""" + sample_invalid_xml = textwrap.dedent( + """

                    You can use this template as a guide to the simple editor markdown and OLX markup to use for dropdown @@ -3617,7 +3662,8 @@ def test_invalid_dropdown_xml(self): - """) + """ + ) with pytest.raises(Exception): CapaFactory.create(xml=problem_xml) @@ -3629,9 +3675,9 @@ def test_default(self): Check that complex numbers can be encoded into JSON. """ complex_num = 1 - 1j - expected_str = '1-1*j' + expected_str = "1-1*j" json_str = json.dumps(complex_num, cls=ComplexEncoder) - assert expected_str == json_str[1:(- 1)] + assert expected_str == json_str[1:(-1)] # ignore quotes @@ -3677,50 +3723,61 @@ def test_choice_answer_text(self): """ # Whitespace screws up comparisons - xml = ''.join(line.strip() for line in xml.split('\n')) + xml = "".join(line.strip() for line in xml.split("\n")) factory = self.capa_factory_for_problem_xml(xml) block = factory.create() answer_input_dict = { - factory.input_key(2): 'blue', - factory.input_key(3): 'choice_0', - factory.input_key(4): ['choice_0', 'choice_1'], + factory.input_key(2): "blue", + factory.input_key(3): "choice_0", + factory.input_key(4): ["choice_0", "choice_1"], } event = self.get_event_for_answers(block, answer_input_dict) - assert event['submission'] ==\ - {factory.answer_key(2): {'question': 'What color is the open ocean on a sunny day?', - 'answer': 'blue', 'response_type': 'optionresponse', - 'input_type': 'optioninput', - 'correct': True, - 'group_label': '', - 'variant': ''}, - factory.answer_key(3): {'question': 'Which piece of furniture is built for sitting?', - 'answer': 'a table', - 'response_type': 'multiplechoiceresponse', - 'input_type': 'choicegroup', - 'correct': False, - 'group_label': '', - 'variant': ''}, - factory.answer_key(4): {'question': 'Which of the following are musical instruments?', - 'answer': ['a piano', 'a tree'], - 'response_type': 'choiceresponse', - 'input_type': 'checkboxgroup', - 'correct': False, - 'group_label': '', - 'variant': ''}} + assert event["submission"] == { + factory.answer_key(2): { + "question": "What color is the open ocean on a sunny day?", + "answer": "blue", + "response_type": "optionresponse", + "input_type": "optioninput", + "correct": True, + "group_label": "", + "variant": "", + }, + factory.answer_key(3): { + "question": "Which piece of furniture is built for sitting?", + "answer": "a table", + "response_type": "multiplechoiceresponse", + "input_type": "choicegroup", + "correct": False, + "group_label": "", + "variant": "", + }, + factory.answer_key(4): { + "question": "Which of the following are musical instruments?", + "answer": ["a piano", "a tree"], + "response_type": "choiceresponse", + "input_type": "checkboxgroup", + "correct": False, + "group_label": "", + "variant": "", + }, + } def capa_factory_for_problem_xml(self, xml): # lint-amnesty, pylint: disable=missing-function-docstring class CustomCapaFactory(CapaFactory): """ A factory for creating a Capa problem with arbitrary xml. """ + sample_problem_xml = textwrap.dedent(xml) return CustomCapaFactory - def get_event_for_answers(self, block, answer_input_dict): # lint-amnesty, pylint: disable=missing-function-docstring - with patch.object(block.runtime, 'publish') as mock_publish: + def get_event_for_answers( + self, block, answer_input_dict + ): # lint-amnesty, pylint: disable=missing-function-docstring + with patch.object(block.runtime, "publish") as mock_publish: block.submit_problem(answer_input_dict) assert len(mock_publish.mock_calls) >= 2 @@ -3734,24 +3791,27 @@ def test_numerical_textline(self): factory = CapaFactory block = factory.create() - answer_input_dict = { - factory.input_key(2): '3.14' - } + answer_input_dict = {factory.input_key(2): "3.14"} event = self.get_event_for_answers(block, answer_input_dict) - assert event['submission'] ==\ - {factory.answer_key(2): {'question': '', 'answer': '3.14', - 'response_type': 'numericalresponse', - 'input_type': 'textline', - 'correct': True, - 'group_label': '', - 'variant': ''}} + assert event["submission"] == { + factory.answer_key(2): { + "question": "", + "answer": "3.14", + "response_type": "numericalresponse", + "input_type": "textline", + "correct": True, + "group_label": "", + "variant": "", + } + } def test_multiple_inputs(self): - group_label = 'Choose the correct color' - input1_label = 'What color is the sky?' - input2_label = 'What color are pine needles?' - factory = self.capa_factory_for_problem_xml("""\ + group_label = "Choose the correct color" + input1_label = "What color is the sky?" + input2_label = "What color are pine needles?" + factory = self.capa_factory_for_problem_xml( + """\ @@ -3759,34 +3819,45 @@ def test_multiple_inputs(self): - """.format(group_label, input1_label, input2_label)) + """.format( + group_label, input1_label, input2_label + ) + ) block = factory.create() answer_input_dict = { - factory.input_key(2, 1): 'blue', - factory.input_key(2, 2): 'yellow', + factory.input_key(2, 1): "blue", + factory.input_key(2, 2): "yellow", } event = self.get_event_for_answers(block, answer_input_dict) - assert event['submission'] ==\ - {factory.answer_key(2, 1): {'group_label': group_label, - 'question': input1_label, - 'answer': 'blue', - 'response_type': 'optionresponse', - 'input_type': 'optioninput', - 'correct': True, 'variant': ''}, - factory.answer_key(2, 2): {'group_label': group_label, - 'question': input2_label, - 'answer': 'yellow', - 'response_type': 'optionresponse', - 'input_type': 'optioninput', - 'correct': False, 'variant': ''}} + assert event["submission"] == { + factory.answer_key(2, 1): { + "group_label": group_label, + "question": input1_label, + "answer": "blue", + "response_type": "optionresponse", + "input_type": "optioninput", + "correct": True, + "variant": "", + }, + factory.answer_key(2, 2): { + "group_label": group_label, + "question": input2_label, + "answer": "yellow", + "response_type": "optionresponse", + "input_type": "optioninput", + "correct": False, + "variant": "", + }, + } def test_optioninput_extended_xml(self): """Test the new XML form of writing with

                    What is 1+4?

                    @@ -3917,14 +4005,15 @@ def test_get_answer_with_jump_to_id_urls(self):
                  • - """) + """ + ) data = {} - problem = CapaFactory.create(showanswer='always', xml=problem_xml) - problem.runtime.service(problem, 'replace_urls').replace_urls = Mock() + problem = CapaFactory.create(showanswer="always", xml=problem_xml) + problem.runtime.service(problem, "replace_urls").replace_urls = Mock() problem.get_answer(data) - assert problem.runtime.service(problem, 'replace_urls').replace_urls.called + assert problem.runtime.service(problem, "replace_urls").replace_urls.called class ProblemBlockReportGenerationTest(unittest.TestCase): @@ -3934,12 +4023,11 @@ class ProblemBlockReportGenerationTest(unittest.TestCase): def setUp(self): # lint-amnesty, pylint: disable=super-method-not-called self.find_question_label_patcher = patch( - 'xmodule.capa.capa_problem.LoncapaProblem.find_question_label', - lambda self, answer_id: answer_id + "xmodule.capa.capa_problem.LoncapaProblem.find_question_label", lambda self, answer_id: answer_id ) self.find_answer_text_patcher = patch( - 'xmodule.capa.capa_problem.LoncapaProblem.find_answer_text', - lambda self, answer_id, current_answer: current_answer + "xmodule.capa.capa_problem.LoncapaProblem.find_answer_text", + lambda self, answer_id, current_answer: current_answer, ) self.find_question_label_patcher.start() self.find_answer_text_patcher.start() @@ -3948,18 +4036,17 @@ def setUp(self): # lint-amnesty, pylint: disable=super-method-not-called def _mock_user_state_generator(self, user_count=1, response_count=10): for uid in range(user_count): - yield self._user_state(username=f'user{uid}', response_count=response_count) + yield self._user_state(username=f"user{uid}", response_count=response_count) - def _user_state(self, username='testuser', response_count=10, suffix=''): + def _user_state(self, username="testuser", response_count=10, suffix=""): return XBlockUserState( username=username, state={ - 'student_answers': { - f'{username}_answerid_{aid}{suffix}': f'{username}_answer_{aid}' - for aid in range(response_count) + "student_answers": { + f"{username}_answerid_{aid}{suffix}": f"{username}_answer_{aid}" for aid in range(response_count) }, - 'seed': 1, - 'correct_map': {}, + "seed": 1, + "correct_map": {}, }, block_key=None, updated=None, @@ -3967,14 +4054,14 @@ def _user_state(self, username='testuser', response_count=10, suffix=''): ) def _get_block(self): # lint-amnesty, pylint: disable=missing-function-docstring - scope_ids = Mock(block_type='problem') + scope_ids = Mock(block_type="problem") block = ProblemBlock(get_test_system(), scope_ids=scope_ids) block.runtime = Mock() - block.data = '' + block.data = "" return block def test_generate_report_data_not_implemented(self): - scope_ids = Mock(block_type='noproblem') + scope_ids = Mock(block_type="noproblem") block = ProblemBlock(get_test_system(), scope_ids=scope_ids) with pytest.raises(NotImplementedError): next(block.generate_report_data(iter([]))) @@ -3988,29 +4075,33 @@ def test_generate_report_data_dont_limit_responses(self): block = self._get_block() user_count = 5 response_count = 10 - report_data = list(block.generate_report_data( - self._mock_user_state_generator( - user_count=user_count, - response_count=response_count, + report_data = list( + block.generate_report_data( + self._mock_user_state_generator( + user_count=user_count, + response_count=response_count, + ) ) - )) + ) assert (user_count * response_count) == len(report_data) def test_generate_report_data_skip_dynamath(self): block = self._get_block() - iterator = iter([self._user_state(suffix='_dynamath')]) + iterator = iter([self._user_state(suffix="_dynamath")]) report_data = list(block.generate_report_data(iterator)) assert 0 == len(report_data) def test_generate_report_data_report_loncapa_error(self): - #Test to make sure reports continue despite loncappa errors, and write them into the report. + # Test to make sure reports continue despite loncappa errors, and write them into the report. block = self._get_block() - with patch('xmodule.capa_block.LoncapaProblem') as mock_LoncapaProblem: + with patch("xmodule.capa_block.LoncapaProblem") as mock_LoncapaProblem: mock_LoncapaProblem.side_effect = LoncapaProblemError - report_data = list(block.generate_report_data( - self._mock_user_state_generator( - user_count=1, - response_count=5, + report_data = list( + block.generate_report_data( + self._mock_user_state_generator( + user_count=1, + response_count=5, + ) ) - )) - assert 'Python Error: No Answer Retrieved' in list(report_data[0][1].values()) + ) + assert "Python Error: No Answer Retrieved" in list(report_data[0][1].values()) diff --git a/xmodule/tests/test_fields.py b/xmodule/tests/test_fields.py index 1e928fb1b3d8..95b329bdb00d 100644 --- a/xmodule/tests/test_fields.py +++ b/xmodule/tests/test_fields.py @@ -1,8 +1,8 @@ """Tests for classes defined in fields.py.""" - import datetime import unittest + import pytest from pytz import UTC @@ -13,63 +13,63 @@ class DateTest(unittest.TestCase): # lint-amnesty, pylint: disable=missing-clas date = Date() def compare_dates(self, dt1, dt2, expected_delta): - assert (dt1 - dt2) == expected_delta, ((((str(dt1) + '-') + str(dt2)) + '!=') + str(expected_delta)) + assert (dt1 - dt2) == expected_delta, (((str(dt1) + "-") + str(dt2)) + "!=") + str(expected_delta) def test_from_json(self): """Test conversion from iso compatible date strings to struct_time""" self.compare_dates( - DateTest.date.from_json("2013-01-01"), - DateTest.date.from_json("2012-12-31"), - datetime.timedelta(days=1) + DateTest.date.from_json("2013-01-01"), DateTest.date.from_json("2012-12-31"), datetime.timedelta(days=1) ) self.compare_dates( DateTest.date.from_json("2013-01-01T00"), DateTest.date.from_json("2012-12-31T23"), - datetime.timedelta(hours=1) + datetime.timedelta(hours=1), ) self.compare_dates( DateTest.date.from_json("2013-01-01T00:00"), DateTest.date.from_json("2012-12-31T23:59"), - datetime.timedelta(minutes=1) + datetime.timedelta(minutes=1), ) self.compare_dates( DateTest.date.from_json("2013-01-01T00:00:00"), DateTest.date.from_json("2012-12-31T23:59:59"), - datetime.timedelta(seconds=1) + datetime.timedelta(seconds=1), ) self.compare_dates( DateTest.date.from_json("2013-01-01T00:00:00Z"), DateTest.date.from_json("2012-12-31T23:59:59Z"), - datetime.timedelta(seconds=1) + datetime.timedelta(seconds=1), ) self.compare_dates( DateTest.date.from_json("2012-12-31T23:00:01-01:00"), DateTest.date.from_json("2013-01-01T00:00:00+01:00"), - datetime.timedelta(hours=1, seconds=1) + datetime.timedelta(hours=1, seconds=1), ) def test_enforce_type(self): assert DateTest.date.enforce_type(None) is None - assert DateTest.date.enforce_type('') is None - assert DateTest.date.enforce_type('2012-12-31T23:00:01') ==\ - datetime.datetime(2012, 12, 31, 23, 0, 1, tzinfo=UTC) + assert DateTest.date.enforce_type("") is None + assert DateTest.date.enforce_type("2012-12-31T23:00:01") == datetime.datetime( + 2012, 12, 31, 23, 0, 1, tzinfo=UTC + ) assert DateTest.date.enforce_type(1234567890000) == datetime.datetime(2009, 2, 13, 23, 31, 30, tzinfo=UTC) - assert DateTest.date.enforce_type(datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC)) ==\ - datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC) + assert DateTest.date.enforce_type(datetime.datetime(2014, 5, 9, 21, 1, 27, tzinfo=UTC)) == datetime.datetime( + 2014, 5, 9, 21, 1, 27, tzinfo=UTC + ) with pytest.raises(TypeError): DateTest.date.enforce_type([1]) def test_return_None(self): - assert DateTest.date.from_json('') is None + assert DateTest.date.from_json("") is None assert DateTest.date.from_json(None) is None with pytest.raises(TypeError): - DateTest.date.from_json(['unknown value']) + DateTest.date.from_json(["unknown value"]) def test_old_due_date_format(self): current = datetime.datetime.today() - assert datetime.datetime(current.year, 3, 12, 12, tzinfo=UTC) == DateTest.date.from_json('March 12 12:00') - assert datetime.datetime(current.year, 12, 4, 16, 30, tzinfo=UTC) == DateTest.date.from_json('December 4 16:30') - assert DateTest.date.from_json('12 12:00') is None + assert datetime.datetime(current.year, 3, 12, 12, tzinfo=UTC) == DateTest.date.from_json("March 12 12:00") + assert datetime.datetime(current.year, 12, 4, 16, 30, tzinfo=UTC) == DateTest.date.from_json("December 4 16:30") + assert DateTest.date.from_json("12 12:00") is None def test_non_std_from_json(self): """ @@ -85,35 +85,41 @@ def test_to_json(self): """ Test converting time reprs to iso dates """ - assert DateTest.date.to_json(datetime.datetime.strptime('2012-12-31T23:59:59Z', '%Y-%m-%dT%H:%M:%SZ')) ==\ - '2012-12-31T23:59:59Z' - assert DateTest.date.to_json(DateTest.date.from_json('2012-12-31T23:59:59Z')) == '2012-12-31T23:59:59Z' - assert DateTest.date.to_json(DateTest.date.from_json('2012-12-31T23:00:01-01:00')) ==\ - '2012-12-31T23:00:01-01:00' + assert ( + DateTest.date.to_json(datetime.datetime.strptime("2012-12-31T23:59:59Z", "%Y-%m-%dT%H:%M:%SZ")) + == "2012-12-31T23:59:59Z" + ) + assert DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:59:59Z")) == "2012-12-31T23:59:59Z" + assert ( + DateTest.date.to_json(DateTest.date.from_json("2012-12-31T23:00:01-01:00")) == "2012-12-31T23:00:01-01:00" + ) with pytest.raises(TypeError): - DateTest.date.to_json('2012-12-31T23:00:01-01:00') + DateTest.date.to_json("2012-12-31T23:00:01-01:00") class TimedeltaTest(unittest.TestCase): # lint-amnesty, pylint: disable=missing-class-docstring delta = Timedelta() def test_from_json(self): - assert TimedeltaTest.delta.from_json('1 day 12 hours 59 minutes 59 seconds') ==\ - datetime.timedelta(days=1, hours=12, minutes=59, seconds=59) + assert TimedeltaTest.delta.from_json("1 day 12 hours 59 minutes 59 seconds") == datetime.timedelta( + days=1, hours=12, minutes=59, seconds=59 + ) - assert TimedeltaTest.delta.from_json('1 day 46799 seconds') == datetime.timedelta(days=1, seconds=46799) + assert TimedeltaTest.delta.from_json("1 day 46799 seconds") == datetime.timedelta(days=1, seconds=46799) def test_enforce_type(self): assert TimedeltaTest.delta.enforce_type(None) is None - assert TimedeltaTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)) ==\ - datetime.timedelta(days=1, seconds=46799) - assert TimedeltaTest.delta.enforce_type('1 day 46799 seconds') == datetime.timedelta(days=1, seconds=46799) + assert TimedeltaTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)) == datetime.timedelta( + days=1, seconds=46799 + ) + assert TimedeltaTest.delta.enforce_type("1 day 46799 seconds") == datetime.timedelta(days=1, seconds=46799) with pytest.raises(TypeError): TimedeltaTest.delta.enforce_type([1]) def test_to_json(self): - assert '1 days 46799 seconds' ==\ - TimedeltaTest.delta.to_json(datetime.timedelta(days=1, hours=12, minutes=59, seconds=59)) + assert "1 days 46799 seconds" == TimedeltaTest.delta.to_json( + datetime.timedelta(days=1, hours=12, minutes=59, seconds=59) + ) class RelativeTimeTest(unittest.TestCase): # lint-amnesty, pylint: disable=missing-class-docstring @@ -121,7 +127,7 @@ class RelativeTimeTest(unittest.TestCase): # lint-amnesty, pylint: disable=miss delta = RelativeTime() def test_from_json(self): - assert RelativeTimeTest.delta.from_json('0:05:07') == datetime.timedelta(seconds=307) + assert RelativeTimeTest.delta.from_json("0:05:07") == datetime.timedelta(seconds=307) assert RelativeTimeTest.delta.from_json(100.0) == datetime.timedelta(seconds=100) assert RelativeTimeTest.delta.from_json(None) == datetime.timedelta(seconds=0) @@ -134,16 +140,17 @@ def test_from_json(self): def test_enforce_type(self): assert RelativeTimeTest.delta.enforce_type(None) is None - assert RelativeTimeTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)) ==\ - datetime.timedelta(days=1, seconds=46799) - assert RelativeTimeTest.delta.enforce_type('0:05:07') == datetime.timedelta(seconds=307) + assert RelativeTimeTest.delta.enforce_type(datetime.timedelta(days=1, seconds=46799)) == datetime.timedelta( + days=1, seconds=46799 + ) + assert RelativeTimeTest.delta.enforce_type("0:05:07") == datetime.timedelta(seconds=307) with pytest.raises(TypeError): RelativeTimeTest.delta.enforce_type([1]) def test_to_json(self): - assert '01:02:03' == RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723)) - assert '00:00:00' == RelativeTimeTest.delta.to_json(None) - assert '00:01:40' == RelativeTimeTest.delta.to_json(100.0) + assert "01:02:03" == RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723)) + assert "00:00:00" == RelativeTimeTest.delta.to_json(None) + assert "00:01:40" == RelativeTimeTest.delta.to_json(100.0) error_msg = "RelativeTime max value is 23:59:59=86400.0 seconds, but 90000.0 seconds is passed" with self.assertRaisesRegex(ValueError, error_msg): @@ -153,5 +160,5 @@ def test_to_json(self): RelativeTimeTest.delta.to_json("123") def test_str(self): - assert '01:02:03' == RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723)) - assert '11:02:03' == RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=39723)) + assert "01:02:03" == RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=3723)) + assert "11:02:03" == RelativeTimeTest.delta.to_json(datetime.timedelta(seconds=39723)) diff --git a/xmodule/tests/test_progress.py b/xmodule/tests/test_progress.py index 17cdc2b19a60..c2ae97ced753 100644 --- a/xmodule/tests/test_progress.py +++ b/xmodule/tests/test_progress.py @@ -1,15 +1,15 @@ """Module progress tests""" - import unittest from xmodule.progress import Progress class ProgressTest(unittest.TestCase): - ''' Test that basic Progress objects work. A Progress represents a + """Test that basic Progress objects work. A Progress represents a fraction between 0 and 1. - ''' + """ + not_started = Progress(0, 17) part_done = Progress(2, 6) half_done = Progress(3, 6) @@ -36,7 +36,7 @@ def test_create_object(self): def test_clamp(self): assert (2, 2) == Progress(3, 2).frac() - assert (0, 2) == Progress((- 2), 2).frac() + assert (0, 2) == Progress((-2), 2).frac() def test_frac(self): prg = Progress(1, 2) @@ -73,15 +73,15 @@ def test_done(self): assert not self.not_started.done() def test_str(self): - assert str(self.not_started) == '0/17' - assert str(self.part_done) == '2/6' - assert str(self.done) == '7/7' - assert str(Progress(2.1234, 7)) == '2.12/7' - assert str(Progress(2.0034, 7)) == '2/7' - assert str(Progress(0.999, 7)) == '1/7' + assert str(self.not_started) == "0/17" + assert str(self.part_done) == "2/6" + assert str(self.done) == "7/7" + assert str(Progress(2.1234, 7)) == "2.12/7" + assert str(Progress(2.0034, 7)) == "2/7" + assert str(Progress(0.999, 7)) == "1/7" def test_add(self): - '''Test the Progress.add_counts() method''' + """Test the Progress.add_counts() method""" prg1 = Progress(0, 2) prg2 = Progress(1, 3) prg3 = Progress(2, 5) @@ -96,8 +96,8 @@ def test_add(self): assert add(prg_none, prg2) == prg2.frac() def test_equality(self): - '''Test that comparing Progress objects for equality - works correctly.''' + """Test that comparing Progress objects for equality + works correctly.""" prg1 = Progress(1, 2) prg2 = Progress(2, 4) prg3 = Progress(1, 2) diff --git a/xmodule/tests/test_stringify.py b/xmodule/tests/test_stringify.py index a70453ce2b01..d7568f7b16d2 100644 --- a/xmodule/tests/test_stringify.py +++ b/xmodule/tests/test_stringify.py @@ -2,7 +2,6 @@ Tests stringify functions used in xmodule html """ - from lxml import etree from xmodule.stringify import stringify_children @@ -10,7 +9,7 @@ def test_stringify(): text = 'Hi
                    there Bruce!
                    ' - html = f'''{text}''' + html = f"""{text}""" xml = etree.fromstring(html) out = stringify_children(xml) assert out == text diff --git a/xmodule/tests/test_xml_block.py b/xmodule/tests/test_xml_block.py index 7f33aad0f998..3c25cebee58f 100644 --- a/xmodule/tests/test_xml_block.py +++ b/xmodule/tests/test_xml_block.py @@ -4,8 +4,8 @@ import unittest from unittest.mock import Mock -import dateutil.parser +import dateutil.parser from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator from xblock.field_data import DictFieldData from xblock.fields import Any, Boolean, Dict, Float, Integer, List, Scope, String @@ -30,30 +30,26 @@ def to_json(self, value): class TestFields: # Will be returned by editable_metadata_fields. - max_attempts = Integer(scope=Scope.settings, default=1000, values={'min': 1, 'max': 10}) + max_attempts = Integer(scope=Scope.settings, default=1000, values={"min": 1, "max": 10}) # Will not be returned by editable_metadata_fields because filtered out by non_editable_metadata_fields. due = Date(scope=Scope.settings) # Will not be returned by editable_metadata_fields because is not Scope.settings. student_answers = Dict(scope=Scope.user_state) # Will be returned, and can override the inherited value from XModule. display_name = String( - scope=Scope.settings, - default='local default', - display_name='Local Display Name', - help='local help' + scope=Scope.settings, default="local default", display_name="Local Display Name", help="local help" ) # Used for testing select type, effect of to_json method string_select = CrazyJsonString( scope=Scope.settings, - default='default value', - values=[{'display_name': 'first', 'value': 'value a'}, - {'display_name': 'second', 'value': 'value b'}] + default="default value", + values=[{"display_name": "first", "value": "value a"}, {"display_name": "second", "value": "value b"}], ) showanswer = InheritanceMixin.showanswer # Used for testing select type - float_select = Float(scope=Scope.settings, default=.999, values=[1.23, 0.98]) + float_select = Float(scope=Scope.settings, default=0.999, values=[1.23, 0.98]) # Used for testing float type - float_non_select = Float(scope=Scope.settings, default=.999, values={'min': 0, 'step': .3}) + float_non_select = Float(scope=Scope.settings, default=0.999, values={"min": 0, "step": 0.3}) # Used for testing that Booleans get mapped to select type boolean_select = Boolean(scope=Scope.settings) # Used for testing Lists @@ -69,17 +65,18 @@ class TestableInheritingXBlock(XmlMixin): # lint-amnesty, pylint: disable=abstr """ An XBlock we can use in these tests. """ + inherited = String(scope=Scope.settings, default="the default") not_inherited = String(scope=Scope.settings, default="nothing") def setUp(self): super().setUp() - self.dummy_course_key = CourseLocator('test_org', 'test_123', 'test_run') + self.dummy_course_key = CourseLocator("test_org", "test_123", "test_run") self.system = get_test_descriptor_system() self.all_blocks = {} self.system.get_block = self.all_blocks.get self.field_data = InheritingFieldData( - inheritable_names=['inherited'], + inheritable_names=["inherited"], kvs=DictKeyValueStore({}), ) @@ -87,19 +84,12 @@ def get_block_using_split_kvs(self, block_type, block_id, fields, defaults): """ Construct an Xblock with split mongo kvs. """ - kvs = SplitMongoKVS( - definition=Mock(), - initial_values=fields, - default_values=defaults, - parent=None - ) + kvs = SplitMongoKVS(definition=Mock(), initial_values=fields, default_values=defaults, parent=None) self.field_data = InheritingFieldData( - inheritable_names=['inherited'], + inheritable_names=["inherited"], kvs=kvs, ) - block = self.get_a_block( - usage_id=self.get_usage_id(block_type, block_id) - ) + block = self.get_a_block(usage_id=self.get_usage_id(block_type, block_id)) return block @@ -131,8 +121,8 @@ def test_default_value(self): Test that the Blocks with nothing set with return the fields' defaults. """ block = self.get_a_block() - assert block.inherited == 'the default' - assert block.not_inherited == 'nothing' + assert block.inherited == "the default" + assert block.not_inherited == "nothing" def test_set_value(self): """ @@ -141,8 +131,8 @@ def test_set_value(self): block = self.get_a_block() block.inherited = "Changed!" block.not_inherited = "New Value!" - assert block.inherited == 'Changed!' - assert block.not_inherited == 'New Value!' + assert block.inherited == "Changed!" + assert block.not_inherited == "New Value!" def test_inherited(self): """ @@ -150,11 +140,11 @@ def test_inherited(self): """ parent_block = self.get_a_block(usage_id=self.get_usage_id("course", "parent")) parent_block.inherited = "Changed!" - assert parent_block.inherited == 'Changed!' + assert parent_block.inherited == "Changed!" child = self.get_a_block(usage_id=self.get_usage_id("vertical", "child")) child.parent = parent_block.location - assert child.inherited == 'Changed!' + assert child.inherited == "Changed!" def test_inherited_across_generations(self): """ @@ -162,12 +152,12 @@ def test_inherited_across_generations(self): """ parent = self.get_a_block(usage_id=self.get_usage_id("course", "parent")) parent.inherited = "Changed!" - assert parent.inherited == 'Changed!' + assert parent.inherited == "Changed!" for child_num in range(10): usage_id = self.get_usage_id("vertical", f"child_{child_num}") child = self.get_a_block(usage_id=usage_id) child.parent = parent.location - assert child.inherited == 'Changed!' + assert child.inherited == "Changed!" def test_not_inherited(self): """ @@ -175,11 +165,11 @@ def test_not_inherited(self): """ parent = self.get_a_block(usage_id=self.get_usage_id("course", "parent")) parent.not_inherited = "Changed!" - assert parent.not_inherited == 'Changed!' + assert parent.not_inherited == "Changed!" child = self.get_a_block(usage_id=self.get_usage_id("vertical", "child")) child.parent = parent.location - assert child.not_inherited == 'nothing' + assert child.not_inherited == "nothing" def test_non_defaults_inherited_across_lib(self): """ @@ -192,7 +182,7 @@ def test_non_defaults_inherited_across_lib(self): fields=dict(inherited="changed!"), defaults=dict(inherited="parent's default"), ) - assert parent_block.inherited == 'changed!' + assert parent_block.inherited == "changed!" child = self.get_block_using_split_kvs( block_type="problem", @@ -201,7 +191,7 @@ def test_non_defaults_inherited_across_lib(self): defaults={}, ) child.parent = parent_block.location - assert child.inherited == 'changed!' + assert child.inherited == "changed!" def test_defaults_not_inherited_across_lib(self): """ @@ -214,7 +204,7 @@ def test_defaults_not_inherited_across_lib(self): fields=dict(inherited="changed!"), defaults=dict(inherited="parent's default"), ) - assert parent_block.inherited == 'changed!' + assert parent_block.inherited == "changed!" child = self.get_block_using_split_kvs( block_type="library_content", @@ -238,60 +228,89 @@ def test_display_name_field(self): # Also tests that xml_attributes is filtered out of XmlMixin. assert 1 == len(editable_fields), editable_fields self.assert_field_values( - editable_fields, 'display_name', XModuleMixin.display_name, - explicitly_set=False, value=None, default_value=None + editable_fields, + "display_name", + XModuleMixin.display_name, + explicitly_set=False, + value=None, + default_value=None, ) def test_override_default(self): # Tests that explicitly_set is correct when a value overrides the default (not inheritable). - editable_fields = self.get_xml_editable_fields(DictFieldData({'display_name': 'foo'})) + editable_fields = self.get_xml_editable_fields(DictFieldData({"display_name": "foo"})) self.assert_field_values( - editable_fields, 'display_name', XModuleMixin.display_name, - explicitly_set=True, value='foo', default_value=None + editable_fields, + "display_name", + XModuleMixin.display_name, + explicitly_set=True, + value="foo", + default_value=None, ) def test_integer_field(self): - block = self.get_block(DictFieldData({'max_attempts': '7'})) + block = self.get_block(DictFieldData({"max_attempts": "7"})) editable_fields = block.editable_metadata_fields assert 8 == len(editable_fields) self.assert_field_values( - editable_fields, 'max_attempts', TestFields.max_attempts, - explicitly_set=True, value=7, default_value=1000, type='Integer', - options=TestFields.max_attempts.values + editable_fields, + "max_attempts", + TestFields.max_attempts, + explicitly_set=True, + value=7, + default_value=1000, + type="Integer", + options=TestFields.max_attempts.values, ) self.assert_field_values( - editable_fields, 'display_name', TestFields.display_name, - explicitly_set=False, value='local default', default_value='local default' + editable_fields, + "display_name", + TestFields.display_name, + explicitly_set=False, + value="local default", + default_value="local default", ) editable_fields = self.get_block(DictFieldData({})).editable_metadata_fields self.assert_field_values( - editable_fields, 'max_attempts', TestFields.max_attempts, - explicitly_set=False, value=1000, default_value=1000, type='Integer', - options=TestFields.max_attempts.values + editable_fields, + "max_attempts", + TestFields.max_attempts, + explicitly_set=False, + value=1000, + default_value=1000, + type="Integer", + options=TestFields.max_attempts.values, ) def test_inherited_field(self): - kvs = InheritanceKeyValueStore(initial_values={}, inherited_settings={'showanswer': 'inherited'}) + kvs = InheritanceKeyValueStore(initial_values={}, inherited_settings={"showanswer": "inherited"}) model_data = KvsFieldData(kvs) block = self.get_block(model_data) editable_fields = block.editable_metadata_fields self.assert_field_values( - editable_fields, 'showanswer', InheritanceMixin.showanswer, - explicitly_set=False, value='inherited', default_value='inherited' + editable_fields, + "showanswer", + InheritanceMixin.showanswer, + explicitly_set=False, + value="inherited", + default_value="inherited", ) # Mimic the case where display_name WOULD have been inherited, except we explicitly set it. kvs = InheritanceKeyValueStore( - initial_values={'showanswer': 'explicit'}, - inherited_settings={'showanswer': 'inheritable value'} + initial_values={"showanswer": "explicit"}, inherited_settings={"showanswer": "inheritable value"} ) model_data = KvsFieldData(kvs) block = self.get_block(model_data) editable_fields = block.editable_metadata_fields self.assert_field_values( - editable_fields, 'showanswer', InheritanceMixin.showanswer, - explicitly_set=True, value='explicit', default_value='inheritable value' + editable_fields, + "showanswer", + InheritanceMixin.showanswer, + explicitly_set=True, + value="explicit", + default_value="inheritable value", ) def test_type_and_options(self): @@ -303,35 +322,61 @@ def test_type_and_options(self): # Tests for select self.assert_field_values( - editable_fields, 'string_select', TestFields.string_select, - explicitly_set=False, value='default value', default_value='default value', - type='Select', options=[{'display_name': 'first', 'value': 'value a JSON'}, - {'display_name': 'second', 'value': 'value b JSON'}] + editable_fields, + "string_select", + TestFields.string_select, + explicitly_set=False, + value="default value", + default_value="default value", + type="Select", + options=[ + {"display_name": "first", "value": "value a JSON"}, + {"display_name": "second", "value": "value b JSON"}, + ], ) self.assert_field_values( - editable_fields, 'float_select', TestFields.float_select, - explicitly_set=False, value=.999, default_value=.999, - type='Select', options=[1.23, 0.98] + editable_fields, + "float_select", + TestFields.float_select, + explicitly_set=False, + value=0.999, + default_value=0.999, + type="Select", + options=[1.23, 0.98], ) self.assert_field_values( - editable_fields, 'boolean_select', TestFields.boolean_select, - explicitly_set=False, value=None, default_value=None, - type='Select', options=[{'display_name': "True", "value": True}, {'display_name': "False", "value": False}] + editable_fields, + "boolean_select", + TestFields.boolean_select, + explicitly_set=False, + value=None, + default_value=None, + type="Select", + options=[{"display_name": "True", "value": True}, {"display_name": "False", "value": False}], ) # Test for float self.assert_field_values( - editable_fields, 'float_non_select', TestFields.float_non_select, - explicitly_set=False, value=.999, default_value=.999, - type='Float', options={'min': 0, 'step': .3} + editable_fields, + "float_non_select", + TestFields.float_non_select, + explicitly_set=False, + value=0.999, + default_value=0.999, + type="Float", + options={"min": 0, "step": 0.3}, ) self.assert_field_values( - editable_fields, 'list_field', TestFields.list_field, - explicitly_set=False, value=[], default_value=[], - type='List' + editable_fields, + "list_field", + TestFields.list_field, + explicitly_set=False, + value=[], + default_value=[], + type="List", ) # Start of helper methods @@ -354,43 +399,56 @@ def non_editable_metadata_fields(self): system = get_test_descriptor_system(render_template=Mock()) return system.construct_xblock_from_class(TestModuleBlock, field_data=field_data, scope_ids=Mock()) - def assert_field_values(self, editable_fields, name, field, explicitly_set, value, default_value, # lint-amnesty, pylint: disable=dangerous-default-value - type='Generic', options=[]): # lint-amnesty, pylint: disable=redefined-builtin + def assert_field_values( # lint-amnesty, pylint: disable=dangerous-default-value + self, + editable_fields, + name, + field, + explicitly_set, + value, + default_value, + type="Generic", + options=[], + ): # lint-amnesty, pylint: disable=redefined-builtin test_field = editable_fields[name] - assert field.name == test_field['field_name'] - assert field.display_name == test_field['display_name'] - assert field.help == test_field['help'] + assert field.name == test_field["field_name"] + assert field.display_name == test_field["display_name"] + assert field.help == test_field["help"] - assert field.to_json(value) == test_field['value'] - assert field.to_json(default_value) == test_field['default_value'] + assert field.to_json(value) == test_field["value"] + assert field.to_json(default_value) == test_field["default_value"] - assert options == test_field['options'] - assert type == test_field['type'] + assert options == test_field["options"] + assert type == test_field["type"] - assert explicitly_set == test_field['explicitly_set'] + assert explicitly_set == test_field["explicitly_set"] class TestSerialize(unittest.TestCase): - """ Tests the serialize, method, which is not dependent on type. """ + """Tests the serialize, method, which is not dependent on type.""" def test_serialize(self): - assert serialize_field(None) == 'null' - assert serialize_field(-2) == '-2' - assert serialize_field('2') == '2' - assert serialize_field(-3.41) == '-3.41' - assert serialize_field('2.589') == '2.589' - assert serialize_field(False) == 'false' - assert serialize_field('false') == 'false' - assert serialize_field('fAlse') == 'fAlse' - assert serialize_field('hat box') == 'hat box' - serialized_dict = serialize_field({'bar': 'hat', 'frog': 'green'}) - assert serialized_dict == '{"bar": "hat", "frog": "green"}' or serialized_dict == '{"frog": "green", "bar": "hat"}' # lint-amnesty, pylint: disable=consider-using-in, line-too-long - assert serialize_field([3.5, 5.6]) == '[3.5, 5.6]' - assert serialize_field(['foo', 'bar']) == '["foo", "bar"]' - assert serialize_field("2012-12-31T23:59:59Z") == '2012-12-31T23:59:59Z' - assert serialize_field("1 day 12 hours 59 minutes 59 seconds") == '1 day 12 hours 59 minutes 59 seconds' - assert serialize_field(dateutil.parser.parse('2012-12-31T23:59:59Z')) == '2012-12-31T23:59:59+00:00' + assert serialize_field(None) == "null" + assert serialize_field(-2) == "-2" + assert serialize_field("2") == "2" + assert serialize_field(-3.41) == "-3.41" + assert serialize_field("2.589") == "2.589" + assert serialize_field(False) == "false" + assert serialize_field("false") == "false" + assert serialize_field("fAlse") == "fAlse" + assert serialize_field("hat box") == "hat box" + serialized_dict = serialize_field({"bar": "hat", "frog": "green"}) + assert ( + serialized_dict # lint-amnesty, pylint: disable=consider-using-in, line-too-long + == '{"bar": "hat", "frog": "green"}' + or serialized_dict == '{"frog": "green", "bar": "hat"}' + ) + assert serialize_field([3.5, 5.6]) == "[3.5, 5.6]" + assert serialize_field(["foo", "bar"]) == '["foo", "bar"]' + assert serialize_field("2012-12-31T23:59:59Z") == "2012-12-31T23:59:59Z" + assert serialize_field("1 day 12 hours 59 minutes 59 seconds") == "1 day 12 hours 59 minutes 59 seconds" + assert serialize_field(dateutil.parser.parse("2012-12-31T23:59:59Z")) == "2012-12-31T23:59:59+00:00" class TestDeserialize(unittest.TestCase): @@ -412,27 +470,27 @@ def assertDeserializeNonString(self): self.assertDeserializeEqual([10], [10]) self.assertDeserializeEqual({}, {}) self.assertDeserializeEqual([], []) - self.assertDeserializeEqual(None, 'null') + self.assertDeserializeEqual(None, "null") class TestDeserializeInteger(TestDeserialize): - """ Tests deserialize as related to Integer type. """ + """Tests deserialize as related to Integer type.""" field_type = Integer def test_deserialize(self): - self.assertDeserializeEqual(-2, '-2') + self.assertDeserializeEqual(-2, "-2") self.assertDeserializeEqual("450", '"450"') # False can be parsed as a int (converts to 0) - self.assertDeserializeEqual(False, 'false') + self.assertDeserializeEqual(False, "false") # True can be parsed as a int (converts to 1) - self.assertDeserializeEqual(True, 'true') + self.assertDeserializeEqual(True, "true") # 2.78 can be converted to int, so the string will be deserialized - self.assertDeserializeEqual(-2.78, '-2.78') + self.assertDeserializeEqual(-2.78, "-2.78") def test_deserialize_unsupported_types(self): - self.assertDeserializeEqual('[3]', '[3]') + self.assertDeserializeEqual("[3]", "[3]") # '2.78' cannot be converted to int, so input value is returned self.assertDeserializeEqual('"-2.78"', '"-2.78"') # 'false' cannot be converted to int, so input value is returned @@ -441,134 +499,128 @@ def test_deserialize_unsupported_types(self): class TestDeserializeFloat(TestDeserialize): - """ Tests deserialize as related to Float type. """ + """Tests deserialize as related to Float type.""" field_type = Float def test_deserialize(self): - self.assertDeserializeEqual(-2, '-2') + self.assertDeserializeEqual(-2, "-2") self.assertDeserializeEqual("450", '"450"') - self.assertDeserializeEqual(-2.78, '-2.78') + self.assertDeserializeEqual(-2.78, "-2.78") self.assertDeserializeEqual("0.45", '"0.45"') # False can be parsed as a float (converts to 0) - self.assertDeserializeEqual(False, 'false') + self.assertDeserializeEqual(False, "false") # True can be parsed as a float (converts to 1) - self.assertDeserializeEqual(True, 'true') + self.assertDeserializeEqual(True, "true") def test_deserialize_unsupported_types(self): - self.assertDeserializeEqual('[3]', '[3]') + self.assertDeserializeEqual("[3]", "[3]") # 'false' cannot be converted to float, so input value is returned self.assertDeserializeEqual('"false"', '"false"') self.assertDeserializeNonString() class TestDeserializeBoolean(TestDeserialize): - """ Tests deserialize as related to Boolean type. """ + """Tests deserialize as related to Boolean type.""" field_type = Boolean def test_deserialize(self): # json.loads converts the value to Python bool - self.assertDeserializeEqual(False, 'false') - self.assertDeserializeEqual(True, 'true') + self.assertDeserializeEqual(False, "false") + self.assertDeserializeEqual(True, "true") # json.loads fails, string value is returned. - self.assertDeserializeEqual('False', 'False') - self.assertDeserializeEqual('True', 'True') + self.assertDeserializeEqual("False", "False") + self.assertDeserializeEqual("True", "True") # json.loads deserializes as a string - self.assertDeserializeEqual('false', '"false"') - self.assertDeserializeEqual('fAlse', '"fAlse"') + self.assertDeserializeEqual("false", '"false"') + self.assertDeserializeEqual("fAlse", '"fAlse"') self.assertDeserializeEqual("TruE", '"TruE"') # 2.78 can be converted to a bool, so the string will be deserialized - self.assertDeserializeEqual(-2.78, '-2.78') + self.assertDeserializeEqual(-2.78, "-2.78") self.assertDeserializeNonString() class TestDeserializeString(TestDeserialize): - """ Tests deserialize as related to String type. """ + """Tests deserialize as related to String type.""" field_type = String def test_deserialize(self): - self.assertDeserializeEqual('hAlf', '"hAlf"') - self.assertDeserializeEqual('false', '"false"') - self.assertDeserializeEqual('single quote', 'single quote') + self.assertDeserializeEqual("hAlf", '"hAlf"') + self.assertDeserializeEqual("false", '"false"') + self.assertDeserializeEqual("single quote", "single quote") def test_deserialize_unsupported_types(self): - self.assertDeserializeEqual('3.4', '3.4') - self.assertDeserializeEqual('false', 'false') - self.assertDeserializeEqual('2', '2') - self.assertDeserializeEqual('[3]', '[3]') + self.assertDeserializeEqual("3.4", "3.4") + self.assertDeserializeEqual("false", "false") + self.assertDeserializeEqual("2", "2") + self.assertDeserializeEqual("[3]", "[3]") self.assertDeserializeNonString() class TestDeserializeAny(TestDeserialize): - """ Tests deserialize as related to Any type. """ + """Tests deserialize as related to Any type.""" field_type = Any def test_deserialize(self): - self.assertDeserializeEqual('hAlf', '"hAlf"') - self.assertDeserializeEqual('false', '"false"') - self.assertDeserializeEqual({'bar': 'hat', 'frog': 'green'}, '{"bar": "hat", "frog": "green"}') - self.assertDeserializeEqual([3.5, 5.6], '[3.5, 5.6]') - self.assertDeserializeEqual('[', '[') - self.assertDeserializeEqual(False, 'false') - self.assertDeserializeEqual(3.4, '3.4') + self.assertDeserializeEqual("hAlf", '"hAlf"') + self.assertDeserializeEqual("false", '"false"') + self.assertDeserializeEqual({"bar": "hat", "frog": "green"}, '{"bar": "hat", "frog": "green"}') + self.assertDeserializeEqual([3.5, 5.6], "[3.5, 5.6]") + self.assertDeserializeEqual("[", "[") + self.assertDeserializeEqual(False, "false") + self.assertDeserializeEqual(3.4, "3.4") self.assertDeserializeNonString() class TestDeserializeList(TestDeserialize): - """ Tests deserialize as related to List type. """ + """Tests deserialize as related to List type.""" field_type = List def test_deserialize(self): - self.assertDeserializeEqual(['foo', 'bar'], '["foo", "bar"]') - self.assertDeserializeEqual([3.5, 5.6], '[3.5, 5.6]') - self.assertDeserializeEqual([], '[]') + self.assertDeserializeEqual(["foo", "bar"], '["foo", "bar"]') + self.assertDeserializeEqual([3.5, 5.6], "[3.5, 5.6]") + self.assertDeserializeEqual([], "[]") def test_deserialize_unsupported_types(self): - self.assertDeserializeEqual('3.4', '3.4') - self.assertDeserializeEqual('false', 'false') - self.assertDeserializeEqual('2', '2') + self.assertDeserializeEqual("3.4", "3.4") + self.assertDeserializeEqual("false", "false") + self.assertDeserializeEqual("2", "2") self.assertDeserializeNonString() class TestDeserializeDate(TestDeserialize): - """ Tests deserialize as related to Date type. """ + """Tests deserialize as related to Date type.""" field_type = Date def test_deserialize(self): - self.assertDeserializeEqual('2012-12-31T23:59:59Z', "2012-12-31T23:59:59Z") - self.assertDeserializeEqual('2012-12-31T23:59:59Z', '"2012-12-31T23:59:59Z"') + self.assertDeserializeEqual("2012-12-31T23:59:59Z", "2012-12-31T23:59:59Z") + self.assertDeserializeEqual("2012-12-31T23:59:59Z", '"2012-12-31T23:59:59Z"') self.assertDeserializeNonString() class TestDeserializeTimedelta(TestDeserialize): - """ Tests deserialize as related to Timedelta type. """ + """Tests deserialize as related to Timedelta type.""" field_type = Timedelta def test_deserialize(self): - self.assertDeserializeEqual( - '1 day 12 hours 59 minutes 59 seconds', - '1 day 12 hours 59 minutes 59 seconds' - ) - self.assertDeserializeEqual( - '1 day 12 hours 59 minutes 59 seconds', - '"1 day 12 hours 59 minutes 59 seconds"' - ) + self.assertDeserializeEqual("1 day 12 hours 59 minutes 59 seconds", "1 day 12 hours 59 minutes 59 seconds") + self.assertDeserializeEqual("1 day 12 hours 59 minutes 59 seconds", '"1 day 12 hours 59 minutes 59 seconds"') self.assertDeserializeNonString() class TestDeserializeRelativeTime(TestDeserialize): - """ Tests deserialize as related to Timedelta type. """ + """Tests deserialize as related to Timedelta type.""" field_type = RelativeTime @@ -586,56 +638,56 @@ def test_deserialize(self): """ # test that from_json produces no exceptions - self.assertDeserializeEqual('10:20:30', '"10:20:30"') + self.assertDeserializeEqual("10:20:30", '"10:20:30"') class TestXmlAttributes(XModuleXmlImportTest): def test_unknown_attribute(self): - assert not hasattr(CourseBlock, 'unknown_attr') - course = self.process_xml(CourseFactory.build(unknown_attr='value')) - assert not hasattr(course, 'unknown_attr') - assert course.xml_attributes['unknown_attr'] == 'value' + assert not hasattr(CourseBlock, "unknown_attr") + course = self.process_xml(CourseFactory.build(unknown_attr="value")) + assert not hasattr(course, "unknown_attr") + assert course.xml_attributes["unknown_attr"] == "value" def test_known_attribute(self): - assert hasattr(CourseBlock, 'show_calculator') - course = self.process_xml(CourseFactory.build(show_calculator='true')) + assert hasattr(CourseBlock, "show_calculator") + course = self.process_xml(CourseFactory.build(show_calculator="true")) assert course.show_calculator - assert 'show_calculator' not in course.xml_attributes + assert "show_calculator" not in course.xml_attributes def test_rerandomize_in_policy(self): # Rerandomize isn't a basic attribute of Sequence - assert not hasattr(SequenceBlock, 'rerandomize') + assert not hasattr(SequenceBlock, "rerandomize") - root = SequenceFactory.build(policy={'rerandomize': 'never'}) + root = SequenceFactory.build(policy={"rerandomize": "never"}) ProblemFactory.build(parent=root) seq = self.process_xml(root) # Rerandomize is added to the constructed sequence via the InheritanceMixin - assert seq.rerandomize == 'never' + assert seq.rerandomize == "never" # Rerandomize is a known value coming from policy, and shouldn't appear # in xml_attributes - assert 'rerandomize' not in seq.xml_attributes + assert "rerandomize" not in seq.xml_attributes def test_attempts_in_policy(self): # attempts isn't a basic attribute of Sequence - assert not hasattr(SequenceBlock, 'attempts') + assert not hasattr(SequenceBlock, "attempts") - root = SequenceFactory.build(policy={'attempts': '1'}) + root = SequenceFactory.build(policy={"attempts": "1"}) ProblemFactory.build(parent=root) seq = self.process_xml(root) # attempts isn't added to the constructed sequence, because # it's not in the InheritanceMixin - assert not hasattr(seq, 'attempts') + assert not hasattr(seq, "attempts") # attempts is an unknown attribute, so we should include it # in xml_attributes so that it gets written out (despite the misleading # name) - assert 'attempts' in seq.xml_attributes + assert "attempts" in seq.xml_attributes def check_inheritable_attribute(self, attribute, value): # `attribute` isn't a basic attribute of Sequence @@ -664,6 +716,6 @@ def check_inheritable_attribute(self, attribute, value): assert attribute not in seq.xml_attributes def test_inheritable_attributes(self): - self.check_inheritable_attribute('days_early_for_beta', 2) - self.check_inheritable_attribute('max_attempts', 5) - self.check_inheritable_attribute('visible_to_staff_only', True) + self.check_inheritable_attribute("days_early_for_beta", 2) + self.check_inheritable_attribute("max_attempts", 5) + self.check_inheritable_attribute("visible_to_staff_only", True) diff --git a/xmodule/util/sandboxing.py b/xmodule/util/sandboxing.py index 01e897adeca2..f8f952623ecc 100644 --- a/xmodule/util/sandboxing.py +++ b/xmodule/util/sandboxing.py @@ -5,14 +5,14 @@ from django.conf import settings from opaque_keys.edx.keys import CourseKey, LearningContextKey -DEFAULT_PYTHON_LIB_FILENAME = 'python_lib.zip' +DEFAULT_PYTHON_LIB_FILENAME = "python_lib.zip" def course_code_library_asset_name(): """ Return the asset name to use for course code libraries, defaulting to python_lib.zip. """ - return getattr(settings, 'PYTHON_LIB_FILENAME', DEFAULT_PYTHON_LIB_FILENAME) + return getattr(settings, "PYTHON_LIB_FILENAME", DEFAULT_PYTHON_LIB_FILENAME) def can_execute_unsafe_code(course_id): @@ -34,7 +34,7 @@ def can_execute_unsafe_code(course_id): # in a settings file # To others using this: the code as-is is brittle and likely to be changed in the future, # as per the TODO, so please consider carefully before adding more values to COURSES_WITH_UNSAFE_CODE - for regex in getattr(settings, 'COURSES_WITH_UNSAFE_CODE', []): + for regex in getattr(settings, "COURSES_WITH_UNSAFE_CODE", []): if re.match(regex, str(course_id)): return True return False @@ -63,6 +63,7 @@ class SandboxService: contentstore(function): function which creates an instance of xmodule.content.ContentStore course_id(string or CourseLocator): identifier for the course """ + def __init__(self, contentstore, course_id, **kwargs): super().__init__(**kwargs) self.contentstore = contentstore diff --git a/xmodule/x_module.py b/xmodule/x_module.py index 741481268f8d..4ed1a03b619d 100644 --- a/xmodule/x_module.py +++ b/xmodule/x_module.py @@ -9,7 +9,6 @@ from functools import partial import yaml - from django.conf import settings from lxml import etree from opaque_keys.edx.asides import AsideDefinitionKeyV2, AsideUsageKeyV2 @@ -18,23 +17,9 @@ from webob import Response from webob.multidict import MultiDict from xblock.core import XBlock -from xblock.fields import ( - Dict, - Float, - Integer, - List, - Scope, - String, - UserScope -) +from xblock.fields import Dict, Float, Integer, List, Scope, String, UserScope from xblock.runtime import IdGenerator, IdReader, Runtime -from xmodule import block_metadata_utils -from xmodule.fields import RelativeTime -from xmodule.modulestore.exceptions import ItemNotFoundError -from xmodule.util.builtin_assets import add_webpack_js_to_fragment -from openedx.core.djangolib.markup import HTML - from common.djangoapps.xblock_django.constants import ( ATTR_KEY_ANONYMOUS_USER_ID, ATTR_KEY_REQUEST_COUNTRY_CODE, @@ -44,12 +29,16 @@ ATTR_KEY_USER_IS_STAFF, ATTR_KEY_USER_ROLE, ) - +from openedx.core.djangolib.markup import HTML +from xmodule import block_metadata_utils +from xmodule.fields import RelativeTime +from xmodule.modulestore.exceptions import ItemNotFoundError +from xmodule.util.builtin_assets import add_webpack_js_to_fragment log = logging.getLogger(__name__) -XMODULE_METRIC_NAME = 'edxapp.xmodule' -XMODULE_DURATION_METRIC_NAME = XMODULE_METRIC_NAME + '.duration' +XMODULE_METRIC_NAME = "edxapp.xmodule" +XMODULE_DURATION_METRIC_NAME = XMODULE_METRIC_NAME + ".duration" XMODULE_METRIC_SAMPLE_RATE = 0.1 # xblock view names @@ -57,27 +46,27 @@ # This is the view that will be rendered to display the XBlock in the LMS. # It will also be used to render the block in "preview" mode in Studio, unless # the XBlock also implements author_view. -STUDENT_VIEW = 'student_view' +STUDENT_VIEW = "student_view" # This is the view that will be rendered to display the XBlock in the LMS for unenrolled learners. # Implementations of this view should assume that a user and user data are not available. -PUBLIC_VIEW = 'public_view' +PUBLIC_VIEW = "public_view" # An optional view of the XBlock similar to student_view, but with possible inline # editing capabilities. This view differs from studio_view in that it should be as similar to student_view # as possible. When previewing XBlocks within Studio, Studio will prefer author_view to student_view. -AUTHOR_VIEW = 'author_view' +AUTHOR_VIEW = "author_view" # The view used to render an editor in Studio. The editor rendering can be completely different # from the LMS student_view, and it is only shown when the author selects "Edit". -STUDIO_VIEW = 'studio_view' +STUDIO_VIEW = "studio_view" # Views that present a "preview" view of an xblock (as opposed to an editing view). PREVIEW_VIEWS = [STUDENT_VIEW, PUBLIC_VIEW, AUTHOR_VIEW] DEFAULT_PUBLIC_VIEW_MESSAGE = ( - 'This content is only accessible to enrolled learners. ' - 'Sign in or register, and enroll in this course to view it.' + "This content is only accessible to enrolled learners. " + "Sign in or register, and enroll in this course to view it." ) @@ -91,6 +80,7 @@ class OpaqueKeyReader(IdReader): """ IdReader for :class:`DefinitionKey` and :class:`UsageKey`s. """ + def get_definition_id(self, usage_id): """Retrieve the definition that a usage is derived from. @@ -168,6 +158,7 @@ class AsideKeyGenerator(IdGenerator): """ An :class:`.IdGenerator` that only provides facilities for constructing new XBlockAsides. """ + def create_aside(self, definition_id, usage_id, aside_type): """ Make a new aside definition and usage ids, indicating an :class:`.XBlockAside` of type `aside_type` @@ -208,23 +199,24 @@ def shim_xmodule_js(fragment, js_module_name): import webpack_loader.utils # lint-amnesty, pylint: disable=unused-import if not fragment.js_init_fn: - fragment.initialize_js('XBlockToXModuleShim') - fragment.json_init_args = {'xmodule-type': js_module_name} + fragment.initialize_js("XBlockToXModuleShim") + fragment.json_init_args = {"xmodule-type": js_module_name} - add_webpack_js_to_fragment(fragment, 'XModuleShim') + add_webpack_js_to_fragment(fragment, "XModuleShim") class XModuleFields: """ Common fields for XModules. """ + display_name = String( display_name=_("Display Name"), help=_("The display name for this component."), scope=Scope.settings, # it'd be nice to have a useful default but it screws up other things; so, # use display_name_with_default for those - default=None + default=None, ) @@ -235,6 +227,7 @@ class XModuleMixin(XModuleFields, XBlock): Adding this Mixin to an :class:`XBlock` allows it to cooperate with old-style :class:`XModules` """ + # Attributes for inspection of the block # This indicates whether the xmodule is a problem-type. @@ -259,7 +252,7 @@ class XModuleMixin(XModuleFields, XBlock): # This attribute can be overridden by subclasses, and # the function can also be overridden if the icon class depends on the data # in the module - icon_class = 'other' + icon_class = "other" def __init__(self, *args, **kwargs): self._asides = [] @@ -269,7 +262,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def get_cds_init_args(self): - """ Get initialization data used by SplitModuleStoreRuntime to defer FieldData initialization """ + """Get initialization data used by SplitModuleStoreRuntime to defer FieldData initialization""" if self._cds_init_args is None: raise KeyError("cds_init_args was not provided for this XBlock") if self._cds_init_args is False: @@ -295,8 +288,9 @@ def xmodule_runtime(self): Deprecated in favor of the runtime property. """ warnings.warn( - 'xmodule_runtime property is deprecated. Please use the runtime property instead.', - DeprecationWarning, stacklevel=3, + "xmodule_runtime property is deprecated. Please use the runtime property instead.", + DeprecationWarning, + stacklevel=3, ) return self.runtime @@ -308,8 +302,9 @@ def system(self): Deprecated in favor of the runtime property. """ warnings.warn( - 'system property is deprecated. Please use the runtime property instead.', - DeprecationWarning, stacklevel=3, + "system property is deprecated. Please use the runtime property instead.", + DeprecationWarning, + stacklevel=3, ) return self.runtime @@ -403,9 +398,7 @@ def get_explicitly_set_fields_by_scope(self, scope=Scope.content): result[field.name] = field.read_json(self) except TypeError as exception: exception_message = "{message}, Block-location:{location}, Field-name:{field_name}".format( - message=str(exception), - location=str(self.location), - field_name=field.name + message=str(exception), location=str(self.location), field_name=field.name ) raise TypeError(exception_message) # lint-amnesty, pylint: disable=raise-missing-from return result @@ -467,12 +460,7 @@ def get_children(self, usage_id_filter=None, usage_key_filter=None): # pylint: if usage_id_filter is None and usage_key_filter is not None: usage_id_filter = usage_key_filter - return [ - child - for child - in super().get_children(usage_id_filter) - if child is not None - ] + return [child for child in super().get_children(usage_id_filter) if child is not None] def get_child(self, usage_id): """ @@ -482,7 +470,7 @@ def get_child(self, usage_id): try: child = super().get_child(usage_id) except ItemNotFoundError: - log.warning('Unable to load item %s, skipping', usage_id) + log.warning("Unable to load item %s, skipping", usage_id) return None if child is None: @@ -544,19 +532,19 @@ def get_score(self): return None def max_score(self): - """ Maximum score. Two notes: + """Maximum score. Two notes: - * This is generic; in abstract, a problem could be 3/5 points on one - randomization, and 5/7 on another + * This is generic; in abstract, a problem could be 3/5 points on one + randomization, and 5/7 on another - * In practice, this is a Very Bad Idea, and (a) will break some code - in place (although that code should get fixed), and (b) break some - analytics we plan to put in place. + * In practice, this is a Very Bad Idea, and (a) will break some code + in place (although that code should get fixed), and (b) break some + analytics we plan to put in place. """ return None def get_progress(self): - """ Return a progress.Progress object that represents how far the + """Return a progress.Progress object that represents how far the student has gone in this module. Must be implemented to get correct progress tracking behavior in nesting modules like sequence and vertical. @@ -579,8 +567,8 @@ def bind_for_student(self, user_id, wrappers=None): # Skip rebinding if we're already bound a user, and it's this user. if self.scope_ids.user_id is not None and user_id == self.scope_ids.user_id: - if getattr(self.runtime, 'position', None): - self.position = self.runtime.position # update the position of the tab + if getattr(self.runtime, "position", None): + self.position = self.runtime.position # update the position of the tab return # If we are switching users mid-request, save the data from the old user. @@ -608,7 +596,7 @@ def bind_for_student(self, user_id, wrappers=None): if wrappers: # Put user-specific wrappers around the field-data service for this block. # Note that these are different from modulestore.xblock_field_data_wrappers, which are not user-specific. - wrapped_field_data = self.runtime.service(self, 'field-data-unbound') + wrapped_field_data = self.runtime.service(self, "field-data-unbound") for wrapper in wrappers: wrapped_field_data = wrapper(wrapped_field_data) self._bound_field_data = wrapped_field_data @@ -637,7 +625,7 @@ def editable_metadata_fields(self): metadata_fields = {} # Only use the fields from this class, not mixins - fields = getattr(self, 'unmixed_class', self.__class__).fields + fields = getattr(self, "unmixed_class", self.__class__).fields for field in fields.values(): if field in self.non_editable_metadata_fields: @@ -653,16 +641,17 @@ def _create_metadata_editor_info(self, field): """ Creates the information needed by the metadata editor for a specific field. """ + def jsonify_value(field, json_choice): """ Convert field value to JSON, if needed. """ if isinstance(json_choice, dict): new_json_choice = dict(json_choice) # make a copy so below doesn't change the original - if 'display_name' in json_choice: - new_json_choice['display_name'] = get_text(json_choice['display_name']) - if 'value' in json_choice: - new_json_choice['value'] = field.to_json(json_choice['value']) + if "display_name" in json_choice: + new_json_choice["display_name"] = get_text(json_choice["display_name"]) + if "value" in json_choice: + new_json_choice["value"] = field.to_json(json_choice["value"]) else: new_json_choice = field.to_json(json_choice) return new_json_choice @@ -676,10 +665,10 @@ def get_text(value): # gets the 'default_value' and 'explicitly_set' attrs metadata_field_editor_info = self.runtime.get_field_provenance(self, field) - metadata_field_editor_info['field_name'] = field.name - metadata_field_editor_info['display_name'] = get_text(field.display_name) - metadata_field_editor_info['help'] = get_text(field.help) - metadata_field_editor_info['value'] = field.read_json(self) + metadata_field_editor_info["field_name"] = field.name + metadata_field_editor_info["display_name"] = get_text(field.display_name) + metadata_field_editor_info["help"] = get_text(field.help) + metadata_field_editor_info["value"] = field.read_json(self) # We support the following editors: # 1. A select editor for fields with a list of possible values (includes Booleans). @@ -688,7 +677,7 @@ def get_text(value): editor_type = "Generic" values = field.values if "values_provider" in field.runtime_options: - values = field.runtime_options['values_provider'](self) + values = field.runtime_options["values_provider"](self) if isinstance(values, (tuple, list)) and len(values) > 0: editor_type = "Select" values = [jsonify_value(field, json_choice) for json_choice in values] @@ -704,8 +693,8 @@ def get_text(value): editor_type = "RelativeTime" elif isinstance(field, String) and field.name == "license": editor_type = "License" - metadata_field_editor_info['type'] = editor_type - metadata_field_editor_info['options'] = [] if values is None else values + metadata_field_editor_info["type"] = editor_type + metadata_field_editor_info["options"] = [] if values is None else values return metadata_field_editor_info @@ -721,11 +710,9 @@ def public_view(self, _context): if self.display_name: display_text = _( - '{display_name} is only accessible to enrolled learners. ' - 'Sign in or register, and enroll in this course to view it.' - ).format( - display_name=self.display_name - ) + "{display_name} is only accessible to enrolled learners. " + "Sign in or register, and enroll in this course to view it." + ).format(display_name=self.display_name) else: display_text = _(DEFAULT_PUBLIC_VIEW_MESSAGE) # lint-amnesty, pylint: disable=translation-of-non-string @@ -736,18 +723,20 @@ class XModuleToXBlockMixin: """ Common code needed by XModule and XBlocks converted from XModules. """ + @property def ajax_url(self): """ Returns the URL for the ajax handler. """ - return self.runtime.handler_url(self, 'xmodule_handler', '', '').rstrip('/?') + return self.runtime.handler_url(self, "xmodule_handler", "", "").rstrip("/?") @XBlock.handler def xmodule_handler(self, request, suffix=None): """ XBlock handler that wraps `handle_ajax` """ + class FileObjForWebobFiles: """ Turn Webob cgi.FieldStorage uploaded files into pure file objects. @@ -758,6 +747,7 @@ class FileObjForWebobFiles: name, so we carry the FieldStorage .filename attribute as the .name. """ + def __init__(self, webob_file): self.file = webob_file.file self.name = webob_file.filename @@ -773,7 +763,7 @@ def __getattr__(self, name): request_post[key] = list(map(FileObjForWebobFiles, request.POST.getall(key))) response_data = self.handle_ajax(suffix, request_post) - return Response(response_data, content_type='application/json', charset='UTF-8') + return Response(response_data, content_type="application/json", charset="UTF-8") def policy_key(location): @@ -781,7 +771,7 @@ def policy_key(location): Get the key for a location in a policy file. (Since the policy file is specific to a course, it doesn't need the full location url). """ - return f'{location.block_type}/{location.block_id}' + return f"{location.block_type}/{location.block_id}" Template = namedtuple("Template", "metadata data children") @@ -800,6 +790,7 @@ class ResourceTemplates: Note that a template must end with ".yaml" extension otherwise it will not be loaded. """ + template_packages = [__name__] @classmethod @@ -813,7 +804,7 @@ def _load_template(cls, template_path, template_id): with open(template_path) as file_object: template = yaml.safe_load(file_object) - template['template_id'] = template_id + template["template_id"] = template_id return template @classmethod @@ -823,7 +814,7 @@ def _load_templates_in_dir(cls, dirpath): """ templates = [] for template_file in os.listdir(dirpath): - if not template_file.endswith('.yaml'): + if not template_file.endswith(".yaml"): log.warning("Skipping unknown template file %s", template_file) continue @@ -844,21 +835,23 @@ def templates(cls): for dirpath in cls.get_template_dirpaths(): for template in cls._load_templates_in_dir(dirpath): - templates[template['template_id']] = template + templates[template["template_id"]] = template return list(templates.values()) @classmethod def get_template_dir(cls): # lint-amnesty, pylint: disable=missing-function-docstring - if getattr(cls, 'template_dir_name', None): - dirname = os.path.join('templates', cls.template_dir_name) # lint-amnesty, pylint: disable=no-member - template_path = resources.files(__name__.rsplit('.', 1)[0]) / dirname + if getattr(cls, "template_dir_name", None): + dirname = os.path.join("templates", cls.template_dir_name) # lint-amnesty, pylint: disable=no-member + template_path = resources.files(__name__.rsplit(".", 1)[0]) / dirname if not template_path.is_dir(): - log.warning("No resource directory {dir} found when loading {cls_name} templates".format( - dir=dirname, - cls_name=cls.__name__, - )) + log.warning( + "No resource directory {dir} found when loading {cls_name} templates".format( + dir=dirname, + cls_name=cls.__name__, + ) + ) return return dirname return @@ -871,7 +864,7 @@ def get_template_dirpaths(cls): template_dirpaths = [] template_dirname = cls.get_template_dir() if template_dirname: - template_path = resources.files(__name__.rsplit('.', 1)[0]) / template_dirname + template_path = resources.files(__name__.rsplit(".", 1)[0]) / template_dirname if template_path.is_dir(): with resources.as_file(template_path) as template_real_path: template_dirpaths.append(str(template_real_path)) @@ -887,7 +880,7 @@ def get_custom_template_dir(cls): If settings.CUSTOM_RESOURCE_TEMPLATES_DIRECTORY is defined, check if it has a subdirectory named as the class's template_dir_name and return the full path. """ - template_dir_name = getattr(cls, 'template_dir_name', None) + template_dir_name = getattr(cls, "template_dir_name", None) if template_dir_name is None: return @@ -919,6 +912,7 @@ class _ConfigurableFragmentWrapper: """ Runtime mixin that allows for composition of many `wrap_xblock` wrappers """ + def __init__(self, wrappers=None, wrappers_asides=None, **kwargs): """ :param wrappers: A list of wrappers, where each wrapper is: @@ -946,7 +940,7 @@ def wrap_xblock(self, block, view, frag, context): return frag - def wrap_aside(self, block, aside, view, frag, context): # pylint: disable=unused-argument + def wrap_aside(self, block, aside, view, frag, context): # pylint: disable=unused-argument """ See :func:`Runtime.wrap_child` """ @@ -962,7 +956,7 @@ def wrap_aside(self, block, aside, view, frag, context): # pylint: disable=un # Runtime.handler_url interface. # # The monkey-patching happens in cms/djangoapps/xblock_config/apps.py and lms/djangoapps/lms_xblock/apps.py -def block_global_handler_url(block, handler_name, suffix='', query='', thirdparty=False): +def block_global_handler_url(block, handler_name, suffix="", query="", thirdparty=False): """ See :meth:`xblock.runtime.Runtime.handler_url`. """ @@ -978,7 +972,9 @@ def block_global_local_resource_url(block, uri): """ See :meth:`xblock.runtime.Runtime.local_resource_url`. """ - raise NotImplementedError("Applications must monkey-patch this function before using local_resource_url for studio_view") # lint-amnesty, pylint: disable=line-too-long + raise NotImplementedError( + "Applications must monkey-patch this function before using local_resource_url for studio_view" + ) # lint-amnesty, pylint: disable=line-too-long class _MetricsMixin: @@ -999,10 +995,12 @@ def render(self, block, view_name, context=None): # lint-amnesty, pylint: disab duration, block.__class__.__name__, view_name, - getattr(block, 'location', ''), + getattr(block, "location", ""), ) - def handle(self, block, handler_name, request, suffix=''): # lint-amnesty, pylint: disable=missing-function-docstring + def handle( + self, block, handler_name, request, suffix="" + ): # lint-amnesty, pylint: disable=missing-function-docstring start_time = time.time() try: return super().handle(block, handler_name, request, suffix=suffix) @@ -1014,7 +1012,7 @@ def handle(self, block, handler_name, request, suffix=''): # lint-amnesty, pyli duration, block.__class__.__name__, handler_name, - getattr(block, 'location', ''), + getattr(block, "location", ""), ) @@ -1036,10 +1034,11 @@ def anonymous_student_id(self): use `ATTR_KEY_DEPRECATED_ANONYMOUS_USER_ID` from the user service. """ warnings.warn( - 'runtime.anonymous_student_id is deprecated. Please use the user service instead.', - DeprecationWarning, stacklevel=3, + "runtime.anonymous_student_id is deprecated. Please use the user service instead.", + DeprecationWarning, + stacklevel=3, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return user_service.get_current_user().opt_attrs.get(ATTR_KEY_ANONYMOUS_USER_ID) return None @@ -1053,8 +1052,9 @@ def seed(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.seed is deprecated. Please use the user service `user_id` instead.', - DeprecationWarning, stacklevel=2, + "runtime.seed is deprecated. Please use the user service `user_id` instead.", + DeprecationWarning, + stacklevel=2, ) return self.user_id or 0 @@ -1066,10 +1066,11 @@ def user_id(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.user_id is deprecated. Use block.scope_ids.user_id or the user service instead.', - DeprecationWarning, stacklevel=2, + "runtime.user_id is deprecated. Use block.scope_ids.user_id or the user service instead.", + DeprecationWarning, + stacklevel=2, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return user_service.get_current_user().opt_attrs.get(ATTR_KEY_USER_ID) return None @@ -1082,10 +1083,11 @@ def user_is_staff(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.user_is_staff is deprecated. Please use the user service instead.', - DeprecationWarning, stacklevel=2, + "runtime.user_is_staff is deprecated. Please use the user service instead.", + DeprecationWarning, + stacklevel=2, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return user_service.get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_STAFF) return None @@ -1098,10 +1100,11 @@ def user_location(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.user_location is deprecated. Please use the user service instead.', - DeprecationWarning, stacklevel=2, + "runtime.user_location is deprecated. Please use the user service instead.", + DeprecationWarning, + stacklevel=2, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return user_service.get_current_user().opt_attrs.get(ATTR_KEY_REQUEST_COUNTRY_CODE) return None @@ -1118,10 +1121,11 @@ def get_real_user(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.get_real_user is deprecated. Please use the user service instead.', - DeprecationWarning, stacklevel=2, + "runtime.get_real_user is deprecated. Please use the user service instead.", + DeprecationWarning, + stacklevel=2, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return user_service.get_user_by_anonymous_id return None @@ -1136,10 +1140,11 @@ def get_user_role(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.get_user_role is deprecated. Please use the user service instead.', - DeprecationWarning, stacklevel=2, + "runtime.get_user_role is deprecated. Please use the user service instead.", + DeprecationWarning, + stacklevel=2, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return partial(user_service.get_current_user().opt_attrs.get, ATTR_KEY_USER_ROLE) @@ -1151,10 +1156,11 @@ def user_is_beta_tester(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.user_is_beta_tester is deprecated. Please use the user service instead.', - DeprecationWarning, stacklevel=2, + "runtime.user_is_beta_tester is deprecated. Please use the user service instead.", + DeprecationWarning, + stacklevel=2, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return user_service.get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_BETA_TESTER) @@ -1166,10 +1172,11 @@ def user_is_admin(self): Deprecated in favor of the user service. """ warnings.warn( - 'runtime.user_is_admin is deprecated. Please use the user service instead.', - DeprecationWarning, stacklevel=2, + "runtime.user_is_admin is deprecated. Please use the user service instead.", + DeprecationWarning, + stacklevel=2, ) - user_service = self._services.get('user') + user_service = self._services.get("user") if user_service: return user_service.get_current_user().opt_attrs.get(ATTR_KEY_USER_IS_GLOBAL_STAFF) @@ -1181,13 +1188,14 @@ def render_template(self): Deprecated in favor of the mako service. """ warnings.warn( - 'Use of runtime.render_template is deprecated. ' - 'Use MakoService.render_template or a JavaScript-based template instead.', - DeprecationWarning, stacklevel=2, + "Use of runtime.render_template is deprecated. " + "Use MakoService.render_template or a JavaScript-based template instead.", + DeprecationWarning, + stacklevel=2, ) - if hasattr(self, '_deprecated_render_template'): + if hasattr(self, "_deprecated_render_template"): return self._deprecated_render_template - render_service = self._services.get('mako') + render_service = self._services.get("mako") if render_service: return render_service.render_template return None @@ -1210,10 +1218,11 @@ def can_execute_unsafe_code(self): Deprecated in favor of the sandbox service. """ warnings.warn( - 'runtime.can_execute_unsafe_code is deprecated. Please use the sandbox service instead.', - DeprecationWarning, stacklevel=2, + "runtime.can_execute_unsafe_code is deprecated. Please use the sandbox service instead.", + DeprecationWarning, + stacklevel=2, ) - sandbox_service = self._services.get('sandbox') + sandbox_service = self._services.get("sandbox") if sandbox_service: return sandbox_service.can_execute_unsafe_code # Default to saying "no unsafe code". @@ -1230,10 +1239,11 @@ def get_python_lib_zip(self): Deprecated in favor of the sandbox service. """ warnings.warn( - 'runtime.get_python_lib_zip is deprecated. Please use the sandbox service instead.', - DeprecationWarning, stacklevel=2, + "runtime.get_python_lib_zip is deprecated. Please use the sandbox service instead.", + DeprecationWarning, + stacklevel=2, ) - sandbox_service = self._services.get('sandbox') + sandbox_service = self._services.get("sandbox") if sandbox_service: return sandbox_service.get_python_lib_zip # Default to saying "no lib data" @@ -1249,10 +1259,11 @@ def cache(self): Deprecated in favor of the cache service. """ warnings.warn( - 'runtime.cache is deprecated. Please use the cache service instead.', - DeprecationWarning, stacklevel=2, + "runtime.cache is deprecated. Please use the cache service instead.", + DeprecationWarning, + stacklevel=2, ) - return self._services.get('cache') or DoNothingCache() + return self._services.get("cache") or DoNothingCache() @property def filestore(self): @@ -1262,8 +1273,9 @@ def filestore(self): Deprecated in favor of runtime.resources_fs property. """ warnings.warn( - 'runtime.filestore is deprecated. Please use the runtime.resources_fs service instead.', - DeprecationWarning, stacklevel=2, + "runtime.filestore is deprecated. Please use the runtime.resources_fs service instead.", + DeprecationWarning, + stacklevel=2, ) return self.resources_fs @@ -1275,8 +1287,9 @@ def node_path(self): Deprecated. """ warnings.warn( - 'node_path is deprecated. Please use other methods of finding the node_modules location.', - DeprecationWarning, stacklevel=2, + "node_path is deprecated. Please use other methods of finding the node_modules location.", + DeprecationWarning, + stacklevel=2, ) @property @@ -1286,8 +1299,9 @@ def hostname(self): Deprecated in favour of direct import of `django.conf.settings` """ warnings.warn( - 'runtime.hostname is deprecated. Please use `LMS_BASE` from `django.conf.settings`.', - DeprecationWarning, stacklevel=2, + "runtime.hostname is deprecated. Please use `LMS_BASE` from `django.conf.settings`.", + DeprecationWarning, + stacklevel=2, ) return settings.LMS_BASE @@ -1301,9 +1315,10 @@ def rebind_noauth_module_to_user(self): """ warnings.warn( "rebind_noauth_module_to_user is deprecated. Please use the 'rebind_user' service instead.", - DeprecationWarning, stacklevel=2, + DeprecationWarning, + stacklevel=2, ) - rebind_user_service = self._services.get('rebind_user') + rebind_user_service = self._services.get("rebind_user") if rebind_user_service: return partial(rebind_user_service.rebind_noauth_module_to_user) @@ -1315,8 +1330,9 @@ def STATIC_URL(self): # pylint: disable=invalid-name Deprecated in favor of the settings.STATIC_URL configuration. """ warnings.warn( - 'runtime.STATIC_URL is deprecated. Please use settings.STATIC_URL instead.', - DeprecationWarning, stacklevel=2, + "runtime.STATIC_URL is deprecated. Please use settings.STATIC_URL instead.", + DeprecationWarning, + stacklevel=2, ) return settings.STATIC_URL @@ -1329,9 +1345,10 @@ def course_id(self): """ warnings.warn( "`runtime.course_id` is deprecated. Use `context_key` instead: `block.scope_ids.usage_id.context_key`.", - DeprecationWarning, stacklevel=2, + DeprecationWarning, + stacklevel=2, ) - if hasattr(self, '_deprecated_course_id'): + if hasattr(self, "_deprecated_course_id"): return self._deprecated_course_id.for_branch(None) @course_id.setter @@ -1346,6 +1363,7 @@ class ModuleStoreRuntime(_MetricsMixin, _ConfigurableFragmentWrapper, _ModuleSys """ Base class for :class:`Runtime`s to be used with :class:`XBlock`s loaded from ModuleStore. """ + def __init__( self, load_item, @@ -1354,7 +1372,7 @@ def __init__( get_policy=None, render_template=None, disabled_xblock_types=lambda: [], - **kwargs + **kwargs, ): """ load_item: Takes a Location and returns an XModuleDescriptor @@ -1374,8 +1392,8 @@ def __init__( local_resource_url: an implementation of :meth:`xblock.runtime.Runtime.local_resource_url` """ - kwargs.setdefault('id_reader', OpaqueKeyReader()) - kwargs.setdefault('id_generator', AsideKeyGenerator()) + kwargs.setdefault("id_reader", OpaqueKeyReader()) + kwargs.setdefault("id_generator", AsideKeyGenerator()) super().__init__(**kwargs) # This is used by XModules to write out separate files during xml export @@ -1398,14 +1416,15 @@ def __init__( # a few cases where the MakoService is not added to the XBlock's runtime. Specifically: * in the Instructor # Dashboard bulk emails tab, when rendering the HtmlBlock for its WYSIWYG editor. * during testing, when # fetching factory-created blocks. - if 'mako' not in self._services: + if "mako" not in self._services: from common.djangoapps.edxmako.services import MakoService - self._services['mako'] = MakoService() + + self._services["mako"] = MakoService() self.disabled_xblock_types = disabled_xblock_types def get(self, attr): - """ provide uniform access to attributes (like etree).""" + """provide uniform access to attributes (like etree).""" return self.__dict__.get(attr) def set(self, attr, val): @@ -1418,7 +1437,7 @@ def get_block(self, usage_id, for_parent=None): # get_block_for_descriptor property is used to bind additional data such as user data # to the XBlock and to check if the user has access to the block as may be required for # the LMS or Preview. - if getattr(self, 'get_block_for_descriptor', None): + if getattr(self, "get_block_for_descriptor", None): return self.get_block_for_descriptor(block) return block @@ -1447,18 +1466,18 @@ def get_field_provenance(self, xblock, field): # about the kvs, dbmodel, etc. result = {} - result['explicitly_set'] = xblock._field_data.has(xblock, field.name) + result["explicitly_set"] = xblock._field_data.has(xblock, field.name) try: - result['default_value'] = xblock._field_data.default(xblock, field.name) + result["default_value"] = xblock._field_data.default(xblock, field.name) except KeyError: - result['default_value'] = field.to_json(field.default) + result["default_value"] = field.to_json(field.default) return result - def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False): + def handler_url(self, block, handler_name, suffix="", query="", thirdparty=False): # When the Modulestore instantiates ModuleStoreRuntime, we will reference a # global function that the application can override, unless a specific function is # defined for LMS/CMS through the handler_url_override property. - if getattr(self, 'handler_url_override', None): + if getattr(self, "handler_url_override", None): return self.handler_url_override(block, handler_name, suffix, query, thirdparty) return block_global_handler_url(block, handler_name, suffix, query, thirdparty) @@ -1478,7 +1497,7 @@ def applicable_aside_types(self, block): """ # applicable_aside_types_override property can be used by LMS/CMS to define specific filters # and conditions as may be applicable. - if getattr(self, 'applicable_aside_types_override', None): + if getattr(self, "applicable_aside_types_override", None): return self.applicable_aside_types_override(block, applicable_aside_types=super().applicable_aside_types) potential_set = set(super().applicable_aside_types(block)) @@ -1492,7 +1511,7 @@ def resource_url(self, resource): def add_block_as_child_node(self, block, node): child = etree.SubElement(node, block.category) - child.set('url_name', block.url_name) + child.set("url_name", block.url_name) block.add_xml_to_node(child) def publish(self, block, event_type, event): # lint-amnesty, pylint: disable=arguments-differ @@ -1500,7 +1519,7 @@ def publish(self, block, event_type, event): # lint-amnesty, pylint: disable=ar Publish events through the `EventPublishingService`. This ensures that the correct track method is used for Instructor tasks. """ - if publish_service := self._services.get('publish'): + if publish_service := self._services.get("publish"): publish_service.publish(block, event_type, event) def service(self, block, service_name): @@ -1527,19 +1546,20 @@ def service(self, block, service_name): def wrap_aside(self, block, aside, view, frag, context): # LMS/CMS can define custom wrap aside using wrap_asides_override as required. - if getattr(self, 'wrap_asides_override', None): + if getattr(self, "wrap_asides_override", None): return self.wrap_asides_override(block, aside, view, frag, context, request_token=self.request_token) return super().wrap_aside(block, aside, view, frag, context) def layout_asides(self, block, context, frag, view_name, aside_frag_fns): # LMS/CMS can define custom layout aside using layout_asides_override as required. - if getattr(self, 'layout_asides_override', None): + if getattr(self, "layout_asides_override", None): return self.layout_asides_override(block, context, frag, view_name, aside_frag_fns) return super().layout_asides(block, context, frag, view_name, aside_frag_fns) class DoNothingCache: """A duck-compatible object to use in ModuleSystemShim when there's no cache.""" + def get(self, _key): return None diff --git a/xmodule/xml_block.py b/xmodule/xml_block.py index 752fe5a8a912..a589b53d2ae3 100644 --- a/xmodule/xml_block.py +++ b/xmodule/xml_block.py @@ -10,13 +10,14 @@ from xblock.core import XML_NAMESPACES from xblock.fields import Dict, Scope, ScopeIds from xblock.runtime import KvsFieldData + from xmodule.modulestore import EdxJSONEncoder from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata log = logging.getLogger(__name__) # assume all XML files are persisted as utf-8. -EDX_XML_PARSER = XMLParser(dtd_validation=False, load_dtd=False, remove_blank_text=True, encoding='utf-8') +EDX_XML_PARSER = XMLParser(dtd_validation=False, load_dtd=False, remove_blank_text=True, encoding="utf-8") def name_to_pathname(name): @@ -24,7 +25,7 @@ def name_to_pathname(name): Convert a location name for use in a path: replace ':' with '/'. This allows users of the xml format to organize content into directories """ - return name.replace(':', '/') + return name.replace(":", "/") def is_pointer_tag(xml_obj): @@ -40,9 +41,9 @@ def is_pointer_tag(xml_obj): Returns a bool. """ if xml_obj.tag != "course": - expected_attr = {'url_name'} + expected_attr = {"url_name"} else: - expected_attr = {'url_name', 'course', 'org'} + expected_attr = {"url_name", "course", "org"} actual_attr = set(xml_obj.attrib.keys()) @@ -62,7 +63,7 @@ def serialize_field(value): return value elif isinstance(value, datetime.datetime): if value.tzinfo is not None and value.utcoffset() is None: - return value.isoformat() + 'Z' + return value.isoformat() + "Z" return value.isoformat() return json.dumps(value, cls=EdxJSONEncoder) @@ -104,49 +105,58 @@ class XmlMixin: """ Class containing XML parsing functionality shared between XBlock and XModuleDescriptor. """ + resources_dir = None # Extension to append to filename paths - filename_extension = 'xml' - - xml_attributes = Dict(help="Map of unhandled xml attributes, used only for storage between import and export", - default={}, scope=Scope.settings) - - metadata_to_strip = ('data_dir', - 'tabs', 'grading_policy', - 'discussion_blackouts', - # VS[compat] - # These attributes should have been removed from here once all 2012-fall courses imported into - # the CMS and "inline" OLX format deprecated. But, it never got deprecated. Moreover, it's - # widely used to this date. So, we still have to strip them. Also, removing of "filename" - # changes OLX returned by `/api/olx-export/v1/xblock/{block_id}/`, which indicates that some - # places in the platform rely on it. - 'course', 'org', 'url_name', 'filename', - # Used for storing xml attributes between import and export, for roundtrips - 'xml_attributes', - # Used by _import_xml_node_to_parent in cms/djangoapps/contentstore/helpers.py to prevent - # XmlMixin from treating some XML nodes as "pointer nodes". - "x-is-pointer-node", - ) + filename_extension = "xml" + + xml_attributes = Dict( + help="Map of unhandled xml attributes, used only for storage between import and export", + default={}, + scope=Scope.settings, + ) + + metadata_to_strip = ( + "data_dir", + "tabs", + "grading_policy", + "discussion_blackouts", + # VS[compat] + # These attributes should have been removed from here once all 2012-fall courses imported into + # the CMS and "inline" OLX format deprecated. But, it never got deprecated. Moreover, it's + # widely used to this date. So, we still have to strip them. Also, removing of "filename" + # changes OLX returned by `/api/olx-export/v1/xblock/{block_id}/`, which indicates that some + # places in the platform rely on it. + "course", + "org", + "url_name", + "filename", + # Used for storing xml attributes between import and export, for roundtrips + "xml_attributes", + # Used by _import_xml_node_to_parent in cms/djangoapps/contentstore/helpers.py to prevent + # XmlMixin from treating some XML nodes as "pointer nodes". + "x-is-pointer-node", + ) # This is a categories to fields map that contains the block category specific fields which should not be # cleaned and/or override while adding xml to node. metadata_to_not_to_clean = { # A category `video` having `sub` and `transcripts` fields # which should not be cleaned/override in an xml object. - 'video': ('sub', 'transcripts') + "video": ("sub", "transcripts") } - metadata_to_export_to_policy = ('discussion_topics',) + metadata_to_export_to_policy = ("discussion_topics",) @staticmethod def _get_metadata_from_xml(xml_object, remove=True): """ Extract the metadata from the XML. """ - meta = xml_object.find('meta') + meta = xml_object.find("meta") if meta is None: - return '' + return "" dmdata = meta.text if remove: xml_object.remove(meta) @@ -169,9 +179,11 @@ def clean_metadata_from_xml(cls, xml_object, excluded_fields=()): xml_object """ for field_name, field in cls.fields.items(): - if (field.scope == Scope.settings - and field_name not in excluded_fields - and xml_object.get(field_name) is not None): + if ( + field.scope == Scope.settings + and field_name not in excluded_fields + and xml_object.get(field_name) is not None + ): del xml_object.attrib[field_name] @classmethod @@ -197,7 +209,7 @@ def load_file(cls, filepath, fs, def_id): # pylint: disable=invalid-name return cls.file_to_xml(xml_file) except Exception as err: # lint-amnesty, pylint: disable=broad-except # Add info about where we are, but keep the traceback - raise Exception(f'Unable to load file contents at path {filepath} for item {def_id}: {err}') from err + raise Exception(f"Unable to load file contents at path {filepath} for item {def_id}: {err}") from err @classmethod def load_definition(cls, xml_object, system, def_id, id_generator): @@ -216,10 +228,10 @@ def load_definition(cls, xml_object, system, def_id, id_generator): # VS[compat] # The filename attr should have been removed once all 2012-fall courses imported into the CMS and "inline" OLX # format deprecated. This never happened, and `filename` is still used, so we have too keep both formats. - filename = xml_object.get('filename') + filename = xml_object.get("filename") if filename is None: definition_xml = copy.deepcopy(xml_object) - filepath = '' + filepath = "" aside_children = [] else: filepath = cls._format_filepath(xml_object.tag, filename) @@ -228,7 +240,7 @@ def load_definition(cls, xml_object, system, def_id, id_generator): # If the file doesn't exist at the right path, give the class a chance to fix it up. The file will be # written out again in the correct format. This should have gone away once the CMS became online and had # imported all 2012-fall courses from XML. - if not system.resources_fs.exists(filepath) and hasattr(cls, 'backcompat_paths'): + if not system.resources_fs.exists(filepath) and hasattr(cls, "backcompat_paths"): candidates = cls.backcompat_paths(filepath) for candidate in candidates: if system.resources_fs.exists(candidate): @@ -246,11 +258,11 @@ def load_definition(cls, xml_object, system, def_id, id_generator): cls.clean_metadata_from_xml(definition_xml) definition, children = cls.definition_from_xml(definition_xml, system) if definition_metadata: - definition['definition_metadata'] = definition_metadata - definition['filename'] = [filepath, filename] + definition["definition_metadata"] = definition_metadata + definition["filename"] = [filepath, filename] if aside_children: - definition['aside_children'] = aside_children + definition["aside_children"] = aside_children return definition, children @@ -261,7 +273,7 @@ def load_metadata(cls, xml_object): Returns a dictionary {key: value}. """ - metadata = {'xml_attributes': {}} + metadata = {"xml_attributes": {}} for attr, val in xml_object.attrib.items(): if attr in cls.metadata_to_strip: @@ -269,7 +281,7 @@ def load_metadata(cls, xml_object): continue if attr not in cls.fields: - metadata['xml_attributes'][attr] = val + metadata["xml_attributes"][attr] = val else: metadata[attr] = deserialize_field(cls.fields[attr], val) return metadata @@ -284,7 +296,7 @@ def apply_policy(cls, metadata, policy): if attr not in cls.fields: # Store unknown attributes coming from policy.json # in such a way that they will export to xml unchanged - metadata['xml_attributes'][attr] = value + metadata["xml_attributes"][attr] = value else: metadata[attr] = value @@ -308,7 +320,7 @@ def parse_xml(cls, node, runtime, keys): # pylint: disable=too-many-statements if keys is None: # Passing keys=None is against the XBlock API but some platform tests do it. - def_id = runtime.id_generator.create_definition(node.tag, node.get('url_name')) + def_id = runtime.id_generator.create_definition(node.tag, node.get("url_name")) keys = ScopeIds(None, node.tag, def_id, runtime.id_generator.create_usage(def_id)) aside_children = [] @@ -335,21 +347,21 @@ def parse_xml(cls, node, runtime, keys): # pylint: disable=too-many-statements # Make Ike's github preview links work in both old and new file layouts. if is_pointer_tag(node): # new style -- contents actually at filepath - definition['filename'] = [filepath, filepath] + definition["filename"] = [filepath, filepath] metadata = cls.load_metadata(definition_xml) # move definition metadata into dict - dmdata = definition.get('definition_metadata', '') + dmdata = definition.get("definition_metadata", "") if dmdata: - metadata['definition_metadata_raw'] = dmdata + metadata["definition_metadata_raw"] = dmdata try: metadata.update(json.loads(dmdata)) except Exception as err: # lint-amnesty, pylint: disable=broad-except - log.debug('Error in loading metadata %r', dmdata, exc_info=True) - metadata['definition_metadata_err'] = str(err) + log.debug("Error in loading metadata %r", dmdata, exc_info=True) + metadata["definition_metadata_err"] = str(err) - definition_aside_children = definition.pop('aside_children', None) + definition_aside_children = definition.pop("aside_children", None) if definition_aside_children: aside_children.extend(definition_aside_children) @@ -357,7 +369,7 @@ def parse_xml(cls, node, runtime, keys): # pylint: disable=too-many-statements cls.apply_policy(metadata, runtime.get_policy(keys.usage_id)) field_data = {**metadata, **definition, "children": children} - field_data['xml_attributes']['filename'] = definition.get('filename', ['', None]) # for git link + field_data["xml_attributes"]["filename"] = definition.get("filename", ["", None]) # for git link if "filename" in field_data: del field_data["filename"] # filename should only be in xml_attributes. @@ -371,14 +383,16 @@ def parse_xml(cls, node, runtime, keys): # pylint: disable=too-many-statements else: # The "normal" / new way to set field data: xblock = runtime.construct_xblock_from_class(cls, keys) - for (key, value_jsonish) in field_data.items(): + for key, value_jsonish in field_data.items(): if key in cls.fields: setattr(xblock, key, cls.fields[key].from_json(value_jsonish)) - elif key == 'children': + elif key == "children": xblock.children = value_jsonish else: log.warning( - "Imported %s XBlock does not have field %s found in XML.", xblock.scope_ids.block_type, key, + "Imported %s XBlock does not have field %s found in XML.", + xblock.scope_ids.block_type, + key, ) if aside_children: @@ -413,14 +427,14 @@ def load_definition_xml(cls, node, runtime, def_id): """ Loads definition_xml stored in a dedicated file """ - url_name = node.get('url_name') + url_name = node.get("url_name") filepath = cls._format_filepath(node.tag, name_to_pathname(url_name)) definition_xml = cls.load_file(filepath, runtime.resources_fs, def_id) return definition_xml, filepath @classmethod def _format_filepath(cls, category, name): - return f'{category}/{name}.{cls.filename_extension}' + return f"{category}/{name}.{cls.filename_extension}" def export_to_file(self): """If this returns True, write the definition of this block to a separate @@ -459,16 +473,20 @@ def add_xml_to_node(self, node): # Add the non-inherited metadata for attr in sorted(own_metadata(self)): # don't want e.g. data_dir - if (attr not in self.metadata_to_strip - and attr not in self.metadata_to_export_to_policy - and attr not in not_to_clean_fields): + if ( + attr not in self.metadata_to_strip + and attr not in self.metadata_to_export_to_policy + and attr not in not_to_clean_fields + ): val = serialize_field(self.fields[attr].to_json(getattr(self, attr))) try: xml_object.set(attr, val) except Exception: # lint-amnesty, pylint: disable=broad-except logging.exception( - 'Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!', # lint-amnesty, pylint: disable=line-too-long - attr, val, self.url_name + "Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!", # lint-amnesty, pylint: disable=line-too-long + attr, + val, + self.url_name, ) for key, value in self.xml_attributes.items(): @@ -481,11 +499,11 @@ def add_xml_to_node(self, node): # if folder is course then create file with name {course_run}.xml filepath = self._format_filepath( self.category, - self.location.run if self.category == 'course' else url_path, + self.location.run if self.category == "course" else url_path, ) self.runtime.export_fs.makedirs(os.path.dirname(filepath), recreate=True) - with self.runtime.export_fs.open(filepath, 'wb') as fileobj: - ElementTree(xml_object).write(fileobj, pretty_print=True, encoding='utf-8') + with self.runtime.export_fs.open(filepath, "wb") as fileobj: + ElementTree(xml_object).write(fileobj, pretty_print=True, encoding="utf-8") else: # Write all attributes from xml_object onto node node.clear() @@ -496,21 +514,20 @@ def add_xml_to_node(self, node): node.extend(xml_object) # Do not override an existing value for the course. - if not node.get('url_name'): - node.set('url_name', self.url_name) + if not node.get("url_name"): + node.set("url_name", self.url_name) # Special case for course pointers: - if self.category == 'course': + if self.category == "course": # add org and course attributes on the pointer tag - node.set('org', self.location.org) - node.set('course', self.location.course) + node.set("org", self.location.org) + node.set("course", self.location.course) def definition_to_xml(self, resource_fs): """ Return a new etree Element object created from this modules definition. """ - raise NotImplementedError( - "%s does not implement definition_to_xml" % self.__class__.__name__) + raise NotImplementedError("%s does not implement definition_to_xml" % self.__class__.__name__) @property def non_editable_metadata_fields(self):