diff --git a/course/page/code.py b/course/page/code.py index a19a4698d1c793ff65ffd8e90b6eacb7d5e9a7b2..2978954c56cbb54aecd6e7a07f9411efa10a8fb7 100644 --- a/course/page/code.py +++ b/course/page/code.py @@ -613,8 +613,7 @@ class PythonCodeQuestion(PageBaseWithTitle, PageBaseWithValue): transfer_attr("names_for_user") transfer_attr("names_from_user") - if hasattr(self.page_desc, "test_code"): - run_req["test_code"] = self.get_test_code() + run_req["test_code"] = self.get_test_code() if hasattr(self.page_desc, "data_files"): run_req["data_files"] = {} @@ -908,10 +907,11 @@ class PythonCodeQuestion(PageBaseWithTitle, PageBaseWithValue): if name in ["alt", "title"]: return True elif name == "src": - return is_allowed_data_uri([ - "image/png", - "image/jpeg", - ], value) + if is_allowed_data_uri([ + "image/png", + "image/jpeg", + ], value): + return bleach.sanitizer.VALUE_SAFE else: return False @@ -919,7 +919,8 @@ class PythonCodeQuestion(PageBaseWithTitle, PageBaseWithValue): return _("(Non-string in 'HTML' output filtered out)") return bleach.clean(s, - tags=bleach.ALLOWED_TAGS + ["audio", "video", "source"], + tags=bleach.ALLOWED_TAGS + ["audio", "video", "source", + "img"], attributes={ "audio": filter_audio_attributes, "source": filter_source_attributes, @@ -1072,10 +1073,11 @@ class PythonCodeQuestionWithHumanTextFeedback( % location) if hasattr(self.page_desc, "human_feedback_value"): - self.human_feedback_percentage = \ - self.page_desc.human_feedback_value * 100 / self.page_desc.value + self.human_feedback_percentage = ( + self.page_desc.human_feedback_value * 100 / self.page_desc.value) else: - self.human_feedback_percentage = self.page_desc.human_feedback_percentage + self.human_feedback_percentage = ( + self.page_desc.human_feedback_percentage) def required_attrs(self): return super( @@ -1136,7 +1138,8 @@ class PythonCodeQuestionWithHumanTextFeedback( human_feedback_points = None if grade_data is not None: - if grade_data["feedback_text"] is not None: + assert grade_data["feedback_text"] is not None + if grade_data["feedback_text"].strip(): human_feedback_text = markup_to_html( page_context, grade_data["feedback_text"]) @@ -1150,8 +1153,8 @@ class PythonCodeQuestionWithHumanTextFeedback( and code_feedback.correctness is not None): code_feedback_points = code_feedback.correctness*code_points - from relate.utils import render_email_template - feedback = render_email_template( + from django.template.loader import render_to_string + feedback = render_to_string( "course/feedback-code-with-human.html", { "percentage": percentage, diff --git a/tests/base_test_mixins.py b/tests/base_test_mixins.py index aafd08462355ad4b5a4e55fce710f972b9cb32fe..74e0a0afe431ba1dd5de19102314bcc3ddbbfeba 100644 --- a/tests/base_test_mixins.py +++ b/tests/base_test_mixins.py @@ -225,20 +225,27 @@ class ResponseContextMixin(object): def assertResponseContextAnswerFeedbackContainsFeedback( # noqa self, response, expected_feedback, - include_bulk_feedback=True): + include_bulk_feedback=True, html=False): answer_feedback = self.get_response_context_answer_feedback(response) feedback_str = answer_feedback.feedback if include_bulk_feedback: feedback_str += answer_feedback.bulk_feedback self.assertTrue(hasattr(answer_feedback, "feedback")) - self.assertIn(expected_feedback, feedback_str) + if not html: + self.assertIn(expected_feedback, feedback_str) + else: + self.assertInHTML(expected_feedback, feedback_str) def assertResponseContextAnswerFeedbackNotContainsFeedback( # noqa - self, response, expected_feedback): + self, response, expected_feedback, + html=False): answer_feedback = self.get_response_context_answer_feedback(response) self.assertTrue(hasattr(answer_feedback, "feedback")) - self.assertNotIn(expected_feedback, answer_feedback.feedback) + if not html: + self.assertNotIn(expected_feedback, answer_feedback.feedback) + else: + self.assertInHTML(expected_feedback, answer_feedback.feedback, count=0) def assertResponseContextAnswerFeedbackCorrectnessEquals( # noqa self, response, expected_correctness): @@ -488,19 +495,26 @@ class SuperuserCreateMixin(ResponseContextMixin): pretended = session.get("relate_pretend_facilities", None) self.assertIsNone(pretended) - def assertFormErrorLoose(self, response, error, form_name="form"): # noqa - """Assert that error is found in response.context['form'] errors""" + def assertFormErrorLoose(self, response, errors, form_name="form"): # noqa + """Assert that errors is found in response.context['form'] errors""" import itertools + if errors is None: + errors = [] + if not isinstance(errors, (list, tuple)): + errors = [errors] try: form_errors = list( itertools.chain(*response.context[form_name].errors.values())) except TypeError: form_errors = None - if error is not None and form_errors is None: - self.fail("%(form_name)s have no errors") - elif error is None and form_errors is None: - return - self.assertIn(str(error), form_errors) + + if form_errors is None or not form_errors: + if errors: + self.fail("%(form_name)s have no errors") + else: + return + for err in errors: + self.assertIn(err, form_errors) # {{{ defined here so that they can be used by in classmethod and instance method diff --git a/tests/test_content.py b/tests/test_content.py index 589e9f3c87f3b3b5f296704f53dbe22e62617693..dd11b4d9bb780c4aa7ec54b89741e4160b0ae664 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -412,17 +412,17 @@ class YamlJinjaExpansionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): '

value=${#1}

\n' '

example2

') resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % "{%endraw%}" resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % "{% endraw %}" resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) def test_embedded_raw_block2(self): @@ -435,12 +435,12 @@ class YamlJinjaExpansionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): '

value=${#1}\n' 'example2

') resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % "{%-endraw%}" resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) def test_embedded_raw_block3(self): @@ -451,12 +451,12 @@ class YamlJinjaExpansionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): 'example1

\n' '

value=${#1}example2

') resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) markdown = TEST_SANDBOX_MARK_DOWN_PATTERN % "{%-endraw-%}" resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) def test_embedded_raw_block4(self): @@ -468,7 +468,7 @@ class YamlJinjaExpansionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): '

value=${#1}\n' 'example2

') resp = self.get_page_sandbox_preview_response(markdown) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "body", expected_literal) # }}} diff --git a/tests/test_pages/markdowns.py b/tests/test_pages/markdowns.py index b4d857480e6c64219dff998db5d1c82f4e5720cc..dc2cf741ad497aea809ca8ec2f26cd1f64895023 100644 --- a/tests/test_pages/markdowns.py +++ b/tests/test_pages/markdowns.py @@ -4,11 +4,50 @@ markdowns for page sandbox tests CODE_MARKDWON = """ type: PythonCodeQuestion +access_rules: + add_permissions: + - change_answer id: addition value: 1 timeout: 10 prompt: | + # Adding 1 and 2, and assign it to c + +names_from_user: [c] + +initial_code: | + c = + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = 3 + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = 2 + 1 + +correct_code_explanation: This is the [explanation](http://example.com/1). +""" + +CODE_MARKDWON_PATTERN_WITH_DATAFILES = """ +type: PythonCodeQuestion +id: addition +value: 1 +timeout: 10 +data_files: + - question-data/random-data.npy + %(extra_data_file)s +prompt: | + # Adding two numbers in Python setup_code: | @@ -38,14 +77,94 @@ correct_code: | c = a + b """ -CODE_MARKDWON_PATTERN_WITH_DATAFILES = """ +CODE_MARKDWON_WITH_DATAFILES_BAD_FORMAT = """ type: PythonCodeQuestion id: addition value: 1 timeout: 10 data_files: - question-data/random-data.npy - %(extra_data_file)s + - - foo + - bar +prompt: | + + # Adding two numbers in Python + +setup_code: | + import random + + a = random.uniform(-10, 10) + b = random.uniform(-10, 10) + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" + + +CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 = """ +type: PythonCodeQuestion +access_rules: + add_permissions: + - see_answer_after_submission +id: addition +value: 1 +timeout: 10 +prompt: | + + # Adding two numbers in Python + +setup_code: | + import random + + a = random.uniform(-10, 10) + b = random.uniform(-10, 10) + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" + +CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 = """ +type: PythonCodeQuestion +access_rules: + remove_permissions: + - see_answer_after_submission +id: addition +value: 1 +timeout: 10 prompt: | # Adding two numbers in Python @@ -206,3 +325,50 @@ correct_code: | c = a + b """ # noqa + +CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN = """ +type: PythonCodeQuestionWithHumanTextFeedback +id: pymult +access_rules: + add_permissions: + - change_answer +value: %(value)s +%(human_feedback)s +%(extra_attribute)s +timeout: 10 + +prompt: | + + # Adding two numbers in Python + +setup_code: | + import random + + a = random.uniform(-10, 10) + b = random.uniform(-10, 10) + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = a + b + +rubric: | + + The code has to be squeaky-clean. + +""" # noqa diff --git a/tests/test_pages/test_choice.py b/tests/test_pages/test_choice.py index 8c3e10c6dede73e8e28b6de4e075f5aea1a850c5..6700de7a56ed8d01084ccb54ed2affbdc3e3eab2 100644 --- a/tests/test_pages/test_choice.py +++ b/tests/test_pages/test_choice.py @@ -231,7 +231,7 @@ class ChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): markdown = CHOICE_MARKDOWN resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertEqual(len(self.get_sandbox_page_data()), 3) page_data = self.get_sandbox_page_data()[2] @@ -253,7 +253,7 @@ class ChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): markdown = CHOICE_MARKDOWN_WITHOUT_CORRECT_ANSWER resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains( resp, PAGE_ERRORS, "one or more correct answer(s) " @@ -263,7 +263,7 @@ class ChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): markdown = CHOICE_MARKDOWN_WITH_DISREGARD resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains( resp, PAGE_ERRORS, "ChoiceQuestion does not allow any choices " @@ -273,7 +273,7 @@ class ChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): markdown = CHOICE_MARKDOWN_WITH_ALWAYS_CORRECT resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains( resp, PAGE_ERRORS, "ChoiceQuestion does not allow any choices " @@ -284,7 +284,7 @@ class ChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "correct_answer", "This is the explanation.") @@ -296,7 +296,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): resp = self.get_page_sandbox_preview_response( MULTIPLE_CHOICES_MARKDWON_WITH_MULTIPLE_MODE1) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) expected_page_error = ("ValidationError: sandbox, choice 1: " "more than one choice modes set: " "'~CORRECT~~CORRECT~'") @@ -306,7 +306,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): resp = self.get_page_sandbox_preview_response( MULTIPLE_CHOICES_MARKDWON_WITH_MULTIPLE_MODE2) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) expected_page_error = ("ValidationError: sandbox, choice 1: " "more than one choice modes set: " "'~DISREGARD~~CORRECT~'") @@ -321,7 +321,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): "extra_attr": ""}) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertEqual(len(self.get_sandbox_page_data()), 3) page_data = self.get_sandbox_page_data()[2] self.assertTrue("permutation" in page_data) @@ -345,7 +345,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): "extra_attr": ""}) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertEqual(len(self.get_sandbox_page_data()), 3) # This is to make sure page_data exists and is ordered @@ -373,7 +373,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): "extra_attr": ""}) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertEqual(len(self.get_sandbox_page_data()), 3) resp = self.get_page_sandbox_submit_answer_response( @@ -395,7 +395,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): % {"credit_mode": "proportional_correct"}) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) resp = self.get_page_sandbox_submit_answer_response( markdown, answer_data={"choice": ['2', '5']}) @@ -419,7 +419,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): % {"credit_mode": "proportional_correct"}) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) resp = self.get_page_sandbox_submit_answer_response( markdown, @@ -461,7 +461,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextContains(resp, "correct_answer", "This is the explanation.") @@ -476,7 +476,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): "extra_attr": ""}) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains(resp, PAGE_ERRORS, expected_error) def test_with_both_credit_mode_and_allow_partial_credit(self): @@ -499,12 +499,12 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): resp = self.get_page_sandbox_preview_response(markdown1) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains(resp, PAGE_ERRORS, expected_error) resp = self.get_page_sandbox_preview_response(markdown2) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains(resp, PAGE_ERRORS, expected_error) def test_without_credit_mode_but_allow_partial_credit(self): @@ -558,51 +558,51 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): resp = self.get_page_sandbox_preview_response(markdown_exact1) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "exact", loose=True) resp = self.get_page_sandbox_preview_response(markdown_exact2) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "exact", loose=True) resp = self.get_page_sandbox_preview_response(markdown_exact3) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "exact", loose=True) resp = self.get_page_sandbox_preview_response(markdown_exact4) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "exact", loose=True) resp = self.get_page_sandbox_preview_response(markdown_proportional1) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "proportional", loose=True) resp = self.get_page_sandbox_preview_response(markdown_proportional2) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "proportional", loose=True) resp = ( self.get_page_sandbox_preview_response(markdown_proportional_correct1)) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "proportional_correct", loose=True) resp = ( self.get_page_sandbox_preview_response(markdown_proportional_correct2)) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, expected_warning_pattern % "proportional_correct", loose=True) @@ -622,7 +622,7 @@ class MultiChoicesQuestionTest(SingleCoursePageSandboxTestBaseMixin, TestCase): resp = ( self.get_page_sandbox_preview_response(markdown)) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains(resp, PAGE_ERRORS, expected_page_error) @@ -706,11 +706,11 @@ class NormalizedAnswerTest(SingleCoursePageTestMixin, TestCase): buf = BytesIO(resp.content) with zipfile.ZipFile(buf, 'r') as zf: self.assertIsNone(zf.testzip()) - self.assertEqual(len(zf.filelist), 1) + # todo: make more assertions in terms of file content + self.assertEqual( + len([f for f in zf.filelist if f.filename.endswith('.json')]), 1) for f in zf.filelist: self.assertGreater(f.file_size, 0) - # todo: make more assertions in terms of file content - self.assertIn('.json', zf.filelist[0].filename) def test_multiple_choice_page_analytics(self): # todo: make more assertions in terms of content diff --git a/tests/test_pages/test_code.py b/tests/test_pages/test_code.py index 35894aa93ace42ae8a7e41b1d529da32f384e5e8..7fc9b02ce4590d98c5bedcfbdc6194dda4620c30 100644 --- a/tests/test_pages/test_code.py +++ b/tests/test_pages/test_code.py @@ -23,30 +23,30 @@ THE SOFTWARE. """ import six +import zipfile import unittest from unittest import skipIf -from django.test import TestCase, override_settings +from django.test import TestCase, override_settings, RequestFactory from docker.errors import APIError as DockerAPIError from socket import error as socket_error, timeout as sock_timeout import errno +from course.models import FlowSession from course.page.code import ( RUNPY_PORT, request_python_run_with_retries, InvalidPingResponse, - is_nuisance_failure) + is_nuisance_failure, PythonCodeQuestionWithHumanTextFeedback) +from course.utils import FlowPageContext, CoursePageContext + +from tests.test_pages import QUIZ_FLOW_ID +from tests.test_pages.test_generic import MESSAGE_ANSWER_SAVED_TEXT from tests.base_test_mixins import ( - SubprocessRunpyContainerMixin, - SingleCoursePageTestMixin) + SubprocessRunpyContainerMixin, SingleCoursePageTestMixin, + FallBackStorageMessageTestMixin) from tests.test_sandbox import ( SingleCoursePageSandboxTestBaseMixin, PAGE_ERRORS ) -from tests.test_pages import QUIZ_FLOW_ID -from tests.test_pages.utils import ( - skip_real_docker_test, SKIP_REAL_DOCKER_REASON, - REAL_RELATE_DOCKER_URL, REAL_RELATE_DOCKER_RUNPY_IMAGE, - REAL_RELATE_DOCKER_TLS_CONFIG -) from tests.utils import LocmemBackendTestsMixin, mock, mail from . import markdowns @@ -76,85 +76,250 @@ GRADE_CODE_FAILING_MSG = ( RUNPY_WITH_RETRIES_PATH = "course.page.code.request_python_run_with_retries" -class RealDockerTestMixin(object): - @classmethod - def setUpClass(cls): # noqa - from unittest import SkipTest - if skip_real_docker_test: - raise SkipTest(SKIP_REAL_DOCKER_REASON) - - super(RealDockerTestMixin, cls).setUpClass() - cls.override_docker_settings = override_settings( - RELATE_DOCKER_URL=REAL_RELATE_DOCKER_URL, - RELATE_DOCKER_RUNPY_IMAGE=REAL_RELATE_DOCKER_RUNPY_IMAGE, - RELATE_DOCKER_TLS_CONFIG=REAL_RELATE_DOCKER_TLS_CONFIG - ) - cls.override_docker_settings.enable() - cls.make_sure_docker_image_pulled() +def get_flow_page_desc_value_zero_human_full_percentage_side_effect( + flow_id, flow_desc, group_id, page_id): + from course.content import get_flow_page_desc + result = get_flow_page_desc(flow_id, flow_desc, group_id, page_id) + result.value = 0 + result.human_feedback_percentage = 100 + return result - @classmethod - def tearDownClass(cls): # noqa - super(RealDockerTestMixin, cls).tearDownClass() - cls.override_docker_settings.disable() - @classmethod - def make_sure_docker_image_pulled(cls): - import docker - cli = docker.Client( - base_url=REAL_RELATE_DOCKER_URL, - tls=None, - timeout=15, - version="1.19") - - if not bool(cli.images(REAL_RELATE_DOCKER_RUNPY_IMAGE)): - # This should run only once and get cached on Travis-CI - cli.pull(REAL_RELATE_DOCKER_RUNPY_IMAGE) - - -@skipIf(skip_real_docker_test, SKIP_REAL_DOCKER_REASON) -class RealDockerCodePageTest(SingleCoursePageTestMixin, - RealDockerTestMixin, TestCase): +class SingleCourseQuizPageCodeQuestionTest( + SingleCoursePageTestMixin, FallBackStorageMessageTestMixin, + SubprocessRunpyContainerMixin, TestCase): flow_id = QUIZ_FLOW_ID - page_id = "addition" + + @classmethod + def setUpTestData(cls): # noqa + super(SingleCourseQuizPageCodeQuestionTest, cls).setUpTestData() + cls.c.force_login(cls.student_participation.user) + cls.start_flow(cls.flow_id) def setUp(self): # noqa - super(RealDockerCodePageTest, self).setUp() + super(SingleCourseQuizPageCodeQuestionTest, self).setUp() + # This is needed to ensure student is logged in self.c.force_login(self.student_participation.user) - self.start_flow(self.flow_id) - - def test_code_page_correct_answer(self): - answer_data = {"answer": "c = a + b"} - expected_str = ( - "It looks like you submitted code that is identical to " - "the reference solution. This is not allowed.") - resp = self.post_answer_by_page_id(self.page_id, answer_data) - self.assertContains(resp, expected_str, count=1) + + def test_code_page_correct(self): + page_id = "addition" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['c = b + a\r']}) self.assertEqual(resp.status_code, 200) + self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) self.assertEqual(self.end_flow().status_code, 200) self.assertSessionScoreEqual(1) - def test_code_page_wrong_answer(self): - answer_data = {"answer": "c = a - b"} - resp = self.post_answer_by_page_id(self.page_id, answer_data) + def test_code_page_wrong(self): + page_id = "addition" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['c = a - b\r']}) self.assertEqual(resp.status_code, 200) + self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) self.assertEqual(self.end_flow().status_code, 200) self.assertSessionScoreEqual(0) - def test_code_page_user_code_exception_raise(self): - answer_data = {"answer": "c = a ^ b"} - from django.utils.html import escape - expected_error_str1 = escape( - "Your code failed with an exception. " - "A traceback is below.") - expected_error_str2 = escape( - "TypeError: unsupported operand type(s) for ^: " - "'float' and 'float'") - resp = self.post_answer_by_page_id(self.page_id, answer_data) + def test_code_page_identical_to_reference(self): + page_id = "addition" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['c = a + b\r']}) self.assertEqual(resp.status_code, 200) - self.assertContains(resp, expected_error_str1, count=1) - self.assertContains(resp, expected_error_str2, count=1) + self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, + ("It looks like you submitted code " + "that is identical to the reference " + "solution. This is not allowed.")) self.assertEqual(self.end_flow().status_code, 200) - self.assertSessionScoreEqual(0) + self.assertSessionScoreEqual(1) + + def test_download_code_submissions_no_answer(self): + group_page_id = "quiz_tail/addition" + self.end_flow() + + # no answer + with self.temporarily_switch_to_user(self.instructor_participation.user): + resp = self.post_download_all_submissions_by_group_page_id( + group_page_id=group_page_id, flow_id=self.flow_id) + self.assertEqual(resp.status_code, 200) + prefix, zip_file = resp["Content-Disposition"].split('=') + self.assertEqual(prefix, "attachment; filename") + self.assertEqual(resp.get('Content-Type'), "application/zip") + + buf = six.BytesIO(resp.content) + with zipfile.ZipFile(buf, 'r') as zf: + self.assertIsNone(zf.testzip()) + self.assertEqual( + len([f for f in zf.filelist if f.filename.endswith('.py')]), 0) + for f in zf.filelist: + self.assertGreater(f.file_size, 0) + + def test_download_code_submissions_has_answer(self): + group_page_id = "quiz_tail/addition" + + # create an answer + page_id = "addition" + self.post_answer_by_page_id( + page_id, {"answer": ['c = a - b\r']}) + self.end_flow() + with self.temporarily_switch_to_user(self.instructor_participation.user): + resp = self.post_download_all_submissions_by_group_page_id( + group_page_id=group_page_id, flow_id=self.flow_id) + self.assertEqual(resp.status_code, 200) + prefix, zip_file = resp["Content-Disposition"].split('=') + self.assertEqual(prefix, "attachment; filename") + self.assertEqual(resp.get('Content-Type'), "application/zip") + + buf = six.BytesIO(resp.content) + with zipfile.ZipFile(buf, 'r') as zf: + self.assertIsNone(zf.testzip()) + # todo: make more assertions in terms of file content + self.assertEqual( + len([f for f in zf.filelist if f.filename.endswith('.py')]), 1) + for f in zf.filelist: + self.assertGreater(f.file_size, 0) + + def test_code_page_analytics_no_answer(self): + # analytics with no answer + page_id = "addition" + self.end_flow() + with self.temporarily_switch_to_user(self.instructor_participation.user): + resp = self.get_flow_page_analytics( + flow_id=self.flow_id, group_id="quiz_tail", + page_id=page_id) + self.assertEqual(resp.status_code, 200) + + def test_code_page_analytics_has_answer(self): + # create an answer + page_id = "addition" + self.post_answer_by_page_id( + page_id, {"answer": ['c = a - b\r']}) + self.end_flow() + + # todo: make more assertions in terms of content + with self.temporarily_switch_to_user(self.instructor_participation.user): + resp = self.get_flow_page_analytics( + flow_id=self.flow_id, group_id="quiz_tail", + page_id=page_id) + self.assertEqual(resp.status_code, 200) + + def test_code_human_feedback_page_submit(self): + page_id = "pymult" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['c = a * b\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) + self.assertEqual(self.end_flow().status_code, 200) + self.assertSessionScoreEqual(None) + + def test_code_human_feedback_page_grade1(self): + page_id = "pymult" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['c = b * a\r']}) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "'c' looks good") + self.assertEqual(self.end_flow().status_code, 200) + + grade_data = { + "grade_percent": ["100"], + "released": ["on"] + } + + resp = self.post_grade_by_page_id(page_id, grade_data) + self.assertTrue(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "The human grader assigned 2/2 points.") + + # since the test_code didn't do a feedback.set_points() after + # check_scalar() + self.assertSessionScoreEqual(None) + + def test_code_human_feedback_page_grade2(self): + page_id = "pymult" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['c = a / b\r']}) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "'c' is inaccurate") + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "The autograder assigned 0/2 points.") + + self.assertEqual(self.end_flow().status_code, 200) + + feedback_text = "This is the feedback from instructor." + + grade_data = { + "grade_percent": ["100"], + "released": ["on"], + "feedback_text": feedback_text + } + resp = self.post_grade_by_page_id(page_id, grade_data) + self.assertTrue(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "The human grader assigned 2/2 points.") + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, feedback_text) + self.assertSessionScoreEqual(2) + + def test_code_human_feedback_page_grade3(self): + page_id = "py_simple_list" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['b = [a + 1] * 50\r']}) + + # this is testing feedback.finish(0.3, feedback_msg) + # 2 * 0.3 = 0.6 + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "The autograder assigned 0.90/3 points.") + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "The elements in b have wrong values") + self.assertEqual(self.end_flow().status_code, 200) + + # The page is not graded before human grading. + self.assertSessionScoreEqual(None) + + def test_code_human_feedback_page_grade4(self): + page_id = "py_simple_list" + resp = self.post_answer_by_page_id( + page_id, {"answer": ['b = [a] * 50\r']}) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "b looks good") + self.assertEqual(self.end_flow().status_code, 200) + + grade_data = { + "grade_percent": ["100"], + "released": ["on"] + } + + resp = self.post_grade_by_page_id(page_id, grade_data) + self.assertTrue(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, "The human grader assigned 1/1 points.") + + self.assertSessionScoreEqual(4) + + grade_data = { + "released": ["on"] + } + + resp = self.post_grade_by_page_id(page_id, grade_data) + self.assertTrue(resp.status_code, 200) + self.assertFormErrorLoose(resp, None) + self.assertSessionScoreEqual(None) + + # not released + feedback_text = "This is the feedback from instructor." + grade_data = { + "grade_percent": ["100"], + "feedback_text": feedback_text + } + + resp = self.post_grade_by_page_id(page_id, grade_data) + self.assertTrue(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackNotContainsFeedback( + resp, "The human grader assigned 1/1 points.") + self.assertResponseContextAnswerFeedbackNotContainsFeedback( + resp, feedback_text) + + self.assertSessionScoreEqual(None) class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, @@ -169,10 +334,18 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, ) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxNotHaveValidPage(resp) + self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains( resp, PAGE_ERRORS, "data file '%s' not found" % file_name) + def test_data_files_missing_random_question_data_file_bad_format(self): + markdown = markdowns.CODE_MARKDWON_WITH_DATAFILES_BAD_FORMAT + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "data file '%s' not found" % "['foo', 'bar']") + def test_not_multiple_submit_warning(self): markdown = ( markdowns.CODE_MARKDWON_PATTERN_WITH_DATAFILES @@ -180,12 +353,39 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, ) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain( + resp, + NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING + ) + + def test_not_multiple_submit_warning2(self): + markdown = markdowns.CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain( resp, NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING ) + def test_not_multiple_submit_warning3(self): + markdown = markdowns.CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain( + resp, + NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING + ) + + def test_allow_multiple_submit(self): + markdown = markdowns.CODE_MARKDWON + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain(resp, None) + def test_explicity_not_allow_multiple_submit(self): markdown = ( markdowns.CODE_MARKDWON_PATTERN_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT @@ -193,14 +393,14 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, ) resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain(resp, None) def test_question_without_test_code(self): markdown = markdowns.CODE_MARKDWON_PATTERN_WITHOUT_TEST_CODE resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain(resp, None) resp = self.get_page_sandbox_submit_answer_response( @@ -215,7 +415,7 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, markdown = markdowns.CODE_MARKDWON_PATTERN_WITHOUT_CORRECT_CODE resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain(resp, None) resp = self.get_page_sandbox_submit_answer_response( @@ -224,6 +424,148 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1) + def test_question_with_human_feedback_both_feedback_value_feedback_percentage_present(self): # noqa + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 3, + "human_feedback": "human_feedback_value: 2", + "extra_attribute": "human_feedback_percentage: 20"}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "'human_feedback_value' and " + "'human_feedback_percentage' are not " + "allowed to coexist") + + def test_question_with_human_feedback_neither_feedback_value_feedback_percentage_present(self): # noqa + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 3, + "human_feedback": "", + "extra_attribute": ""}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "expecting either 'human_feedback_value' " + "or 'human_feedback_percentage', found neither.") + + def test_question_with_human_feedback_used_feedback_value_warning(self): + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 3, + "human_feedback": "human_feedback_value: 2", + "extra_attribute": ""}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain( + resp, + "Used deprecated 'human_feedback_value' attribute--" + "use 'human_feedback_percentage' instead." + ) + + def test_question_with_human_feedback_used_feedback_value_bad_value(self): + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 0, + "human_feedback": "human_feedback_value: 2", + "extra_attribute": ""}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "'human_feedback_value' attribute is not allowed " + "if value of question is 0, use " + "'human_feedback_percentage' instead") + + def test_question_with_human_feedback_used_feedback_value_invalid(self): + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 2, + "human_feedback": "human_feedback_value: 3", + "extra_attribute": ""}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "human_feedback_value greater than overall " + "value of question") + + def test_question_with_human_feedback_feedback_percentage_invalid(self): + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 2, + "human_feedback": "human_feedback_percentage: 120", + "extra_attribute": ""}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "the value of human_feedback_percentage " + "must be between 0 and 100") + + def test_question_with_human_feedback_value_0_feedback_full_percentage(self): + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 0, + "human_feedback": "human_feedback_percentage: 100", + "extra_attribute": ""}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain(resp, None) + + def test_question_with_human_feedback_value_0_feedback_0_percentage(self): + markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + % {"value": 0, + "human_feedback": "human_feedback_percentage: 0", + "extra_attribute": ""}) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain(resp, None) + + def test_request_python_run_with_retries_raise_uncaught_error_in_sandbox(self): + with mock.patch( + RUNPY_WITH_RETRIES_PATH, + autospec=True + ) as mock_runpy: + expected_error_str = ("This is an error raised with " + "request_python_run_with_retries") + + # correct_code_explanation and correct_code + expected_feedback = ( + '

This is the explanation' + '.

The following code is a valid answer: ' + '
\nc = 2 + 1\n
') + mock_runpy.side_effect = RuntimeError(expected_error_str) + + resp = self.get_page_sandbox_submit_answer_response( + markdowns.CODE_MARKDWON, + answer_data={"answer": ['c = 1 + 2\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, + None) + + self.assertResponseContextContains(resp, "correct_answer", + expected_feedback) + # No email when in sandbox + self.assertEqual(len(mail.outbox), 0) + + def test_request_python_run_with_retries_raise_uncaught_error_debugging(self): + with mock.patch( + RUNPY_WITH_RETRIES_PATH, + autospec=True + ) as mock_runpy: + expected_error_str = ("This is an error raised with " + "request_python_run_with_retries") + mock_runpy.side_effect = RuntimeError(expected_error_str) + + with override_settings(DEBUG=True): + resp = self.get_page_sandbox_submit_answer_response( + markdowns.CODE_MARKDWON, + answer_data={"answer": ['c = 1 + 2\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, + None) + # No email when debugging + self.assertEqual(len(mail.outbox), 0) + def test_request_python_run_with_retries_raise_uncaught_error(self): with mock.patch( RUNPY_WITH_RETRIES_PATH, @@ -243,7 +585,7 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, resp = self.get_page_sandbox_submit_answer_response( markdowns.CODE_MARKDWON, - answer_data={"answer": ['c = b + a\r']}) + answer_data={"answer": ['c = 1 + 2\r']}) self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, None) @@ -272,15 +614,16 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, resp = self.get_page_sandbox_submit_answer_response( markdowns.CODE_MARKDWON, - answer_data={"answer": ['c = b + a\r']}) + answer_data={"answer": ['c = 1 + 2\r']}) self.assertContains(resp, expected_error_str) self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, None) self.assertEqual(len(mail.outbox), 0) - def assert_runpy_result_and_response(self, result_type, expected_msg, - correctness=0, mail_count=0, + def assert_runpy_result_and_response(self, result_type, expected_msgs=None, + not_execpted_msgs=None, + correctness=0, mail_count=0, in_html=False, **extra_result): with mock.patch(RUNPY_WITH_RETRIES_PATH, autospec=True) as mock_runpy: result = {"result": result_type} @@ -289,9 +632,22 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, resp = self.get_page_sandbox_submit_answer_response( markdowns.CODE_MARKDWON, - answer_data={"answer": ['c = b + a\r']}) - self.assertResponseContextAnswerFeedbackContainsFeedback(resp, - expected_msg) + answer_data={"answer": ['c = 1 + 2\r']}) + + if expected_msgs is not None: + if isinstance(expected_msgs, six.text_type): + expected_msgs = [expected_msgs] + for msg in expected_msgs: + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, msg, html=in_html) + + if not_execpted_msgs is not None: + if isinstance(not_execpted_msgs, six.text_type): + not_execpted_msgs = [not_execpted_msgs] + for msg in not_execpted_msgs: + self.assertResponseContextAnswerFeedbackNotContainsFeedback( + resp, msg, html=in_html) + self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, correctness) @@ -319,20 +675,6 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, "unknown_error", None) self.assertIn("invalid runpy result: unknown_error", str(e.exception)) - def test_html_bleached_in_feedback(self): - self.assert_runpy_result_and_response( - "user_error", - "", - html="

some html

" - ) - - def test_html_non_text_bleached_in_feedback(self): - self.assert_runpy_result_and_response( - "user_error", - "(Non-string in 'HTML' output filtered out)", - html=b"not string" - ) - def test_traceback_in_feedback(self): self.assert_runpy_result_and_response( "user_error", @@ -354,6 +696,168 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, stderr="some stderr" ) + def test_exechost_local(self): + self.assert_runpy_result_and_response( + "user_error", + not_execpted_msgs="Your code ran on", + exec_host="localhost" + ) + + def test_exechost_ip(self): + with mock.patch("socket.gethostbyaddr") as mock_get_host: + ip = "192.168.1.100" + resovled = "example.com" + mock_get_host.side_effect = lambda x: (resovled, [], []) + self.assert_runpy_result_and_response( + "user_error", + execpted_msgs="Your code ran on %s" % resovled, + exec_host=ip + ) + + def test_exechost_ip_resolve_failure(self): + with mock.patch("socket.gethostbyaddr") as mock_get_host: + ip = "192.168.1.100" + mock_get_host.side_effect = socket_error + self.assert_runpy_result_and_response( + "user_error", + execpted_msgs="Your code ran on %s" % ip, + exec_host=ip + ) + + def test_figures(self): + bmp_b64 = ("data:image/bmp;base64,Qk1GAAAAAAAAAD4AAAAoAAAAAgAAAAIA" + "AAABAAEAAAAAAAgAAADEDgAAxA4AAAAAAAAAAAAAAAAAAP///wDAAAA" + "AwAAAAA==") + jpeg_b64 = ("data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/4QBa" + "RXhpZgAATU0AKgAAAAgABQMBAAUAAAABAAAASgMDAAEAAAABAAAAAFE" + "QAAEAAAABAQAAAFERAAQAAAABAAAOwlESAAQAAAABAAAOwgAAAAAAAY" + "agAACxj//bAEMAAgEBAgEBAgICAgICAgIDBQMDAwMDBgQEAwUHBgcHB" + "wYHBwgJCwkICAoIBwcKDQoKCwwMDAwHCQ4PDQwOCwwMDP/bAEMBAgIC" + "AwMDBgMDBgwIBwgMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw" + "MDAwMDAwMDAwMDAwMDAwMDAwMDP/AABEIAAIAAgMBIgACEQEDEQH/xA" + "AfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDA" + "gQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8" + "CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY" + "2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmq" + "srO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T" + "19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xA" + "C1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQp" + "GhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdIS" + "UpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeY" + "mZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+T" + "l5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/AP38ooooA//Z") + png_b64 = ( + "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAACAQMAAAB" + "IeJ9nAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAGUExURQAAAP///" + "6XZn90AAAAJcEhZcwAADsIAAA7CARUoSoAAAAAMSURBVBjTYzjAcAAAAwQBgXn" + "6PNcAAAAASUVORK5CYII=") + + self.assert_runpy_result_and_response( + "user_error", + expected_msgs=[png_b64, jpeg_b64, "Figure1", "Figure 1", + "Figure3", "Figure 3", ], + not_execpted_msgs=[bmp_b64, "Figure2", "Figure 2"], + figures=[ + [1, "image/png", png_b64], + [2, "image/bmp", bmp_b64], + [3, "image/jpeg", jpeg_b64] + ] + ) + + def test_html_in_feedback(self): + html = "" + self.assert_runpy_result_and_response( + "user_error", + html, + html=[html] + ) + + js = "" + html_with_js = html + js + self.assert_runpy_result_and_response( + "user_error", + expected_msgs=html, + not_execpted_msgs=js, # js is sanitized + html=[html_with_js] + ) + + def test_html_audio(self): + b64_data = "T2dnUwACAAAAAAAAAAA+HAAAAAAAAGyawCEBQGZpc2h" + audio1 = ( + '' % b64_data) + audio1_1 = ( + '' % b64_data) + audio1_2 = ( + '' % b64_data) + audio2 = ( + '' % b64_data) + audio3 = ( + '' % b64_data) + audio4 = ( + '') + + html = [audio1, audio1_1, audio1_2, audio2, audio3, audio4] + + self.assert_runpy_result_and_response( + "user_error", + expected_msgs=[audio1], + not_execpted_msgs=[audio1_1, audio1_2, audio2, audio3, audio4], + html=html, + in_html=True + ) + + def test_html_img(self): + b64_data = ("iVBORw0KGgoAAAANSUhEUgAAAAIAAAACAQMAAAB" + "IeJ9nAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAGUExURQAAAP///" + "6XZn90AAAAJcEhZcwAADsIAAA7CARUoSoAAAAAMSURBVBjTYzjAcAAAAwQBgXn" + "6PNcAAAAASUVORK5CYII=") + + img1 = ( + 'test img' % b64_data) + + img1_1 = ( + '' % b64_data) + + img1_2 = ( + '' % b64_data) + + img2 = ( + '' % b64_data) + + html = [img1, img1_1, img1_2, img2] + + self.assert_runpy_result_and_response( + "user_error", + expected_msgs=[img1], + # not_execpted_msgs=[img1_1, img1_2, img2], + html=html, + in_html=True, + ) + + def test_html_non_text_bleached_in_feedback(self): + self.assert_runpy_result_and_response( + "user_error", + "(Non-string in 'HTML' output filtered out)", + html=b"not string" + ) + class RequestPythonRunWithRetriesTest(unittest.TestCase): # Testing course.page.code.request_python_run_with_retries, @@ -503,6 +1007,15 @@ class RequestPythonRunWithRetriesTest(unittest.TestCase): case="Docker ping timeout with InvalidPingResponse and " "remove container failed with APIError"): invalid_ping_resp_msg = "my custom invalid ping response exception" + fake_host_ip = "0.0.0.0" + + mock_inpect_ctn.return_value = { + "NetworkSettings": { + "Ports": {"%d/tcp" % RUNPY_PORT: ( + {"HostIp": fake_host_ip, "HostPort": fake_host_port}, + )} + }} + mock_ctn_request.side_effect = ( InvalidPingResponse(invalid_ping_resp_msg)) mock_remove_ctn.reset_mock() @@ -519,7 +1032,7 @@ class RequestPythonRunWithRetriesTest(unittest.TestCase): self.assertEqual(res["result"], "uncaught_error") self.assertEqual(res['message'], "Timeout waiting for container.") - self.assertEqual(res["exec_host"], fake_host_ip) + self.assertEqual(res["exec_host"], "localhost") self.assertIn(InvalidPingResponse.__name__, res["traceback"]) self.assertIn(invalid_ping_resp_msg, res["traceback"]) @@ -669,4 +1182,112 @@ class IsNuisanceFailureTest(unittest.TestCase): "\nhttp.client.RemoteDisconnected: \nfoo"} self.assertTrue(is_nuisance_failure(result)) + +class CodeQuestionWithHumanTextFeedbackSpecialCase( + SingleCoursePageTestMixin, SubprocessRunpyContainerMixin, TestCase): + """ + https://github.com/inducer/relate/issues/269 + https://github.com/inducer/relate/commit/2af0ad7aa053b735620b2cf0bae0b45822bfb87f # noqa + """ + + flow_id = QUIZ_FLOW_ID + + @classmethod + def setUpTestData(cls): # noqa + super(CodeQuestionWithHumanTextFeedbackSpecialCase, cls).setUpTestData() + cls.c.force_login(cls.student_participation.user) + cls.start_flow(cls.flow_id) + + def setUp(self): # noqa + super(CodeQuestionWithHumanTextFeedbackSpecialCase, self).setUp() + self.c.force_login(self.student_participation.user) + self.rf = RequestFactory() + + def get_grade_feedback(self, answer_data, page_value, + human_feedback_percentage, grade_data): + page_id = "py_simple_list" + course_identifier = self.course.identifier + flow_session_id = self.get_default_flow_session_id(course_identifier) + flow_session = FlowSession.objects.get(id=flow_session_id) + + page_ordinal = self.get_page_ordinal_via_page_id( + page_id, course_identifier, flow_session_id) + + post_data = answer_data.copy() + post_data.update({"submit": ""}) + + request = self.rf.post( + self.get_page_url_by_ordinal( + page_ordinal, course_identifier, flow_session_id), + post_data) + request.user = self.student_participation.user + + pctx = CoursePageContext(request, course_identifier) + fpctx = FlowPageContext( + pctx.repo, pctx.course, self.flow_id, page_ordinal, + self.student_participation, flow_session, request) + page_desc = fpctx.page_desc + page_desc.value = page_value + page_desc.human_feedback_percentage = human_feedback_percentage + + page = PythonCodeQuestionWithHumanTextFeedback(None, None, page_desc) + + page_context = fpctx.page_context + grade_data.setdefault('grade_percent', None) + grade_data.setdefault('released', True) + grade_data.setdefault('feedback_text', "") + page_data = fpctx.page_data + feedback = page.grade( + page_context=page_context, + answer_data=answer_data, + page_data=page_data, + grade_data=grade_data) + + return feedback + + def test_code_with_human_feedback(self): + answer_data = {"answer": 'b = [a + 0] * 50'} + grade_data = {"grade_percent": 100} + page_value = 4 + human_feedback_percentage = 60 + feedback = self.get_grade_feedback( + answer_data, page_value, human_feedback_percentage, grade_data) + self.assertIn("The overall grade is 100%.", feedback.feedback) + self.assertIn( + "The autograder assigned 1.60/1.60 points.", feedback.feedback) + self.assertIn( + "The human grader assigned 2.40/2.40 points.", feedback.feedback) + + def test_code_with_human_feedback_full_percentage(self): + answer_data = {"answer": 'b = [a + 0] * 50'} + grade_data = {"grade_percent": 100} + page_value = 0 + human_feedback_percentage = 100 + from course.page.base import AnswerFeedback + with mock.patch( + "course.page.code.PythonCodeQuestion.grade") as mock_py_grade: + + # In this way, code_feedback.correctness is None + mock_py_grade.return_value = AnswerFeedback(correctness=None) + feedback = self.get_grade_feedback( + answer_data, page_value, human_feedback_percentage, grade_data) + self.assertIn("The overall grade is 100%.", feedback.feedback) + self.assertIn( + "No information on correctness of answer.", feedback.feedback) + self.assertIn( + "The human grader assigned 0/0 points.", feedback.feedback) + + def test_code_with_human_feedback_zero_percentage(self): + answer_data = {"answer": 'b = [a + 0] * 50'} + grade_data = {} + page_value = 0 + human_feedback_percentage = 0 + feedback = self.get_grade_feedback( + answer_data, page_value, human_feedback_percentage, grade_data) + self.assertIn("The overall grade is 100%.", feedback.feedback) + self.assertIn( + "Your answer is correct.", feedback.feedback) + self.assertIn( + "The autograder assigned 0/0 points.", feedback.feedback) + # vim: fdm=marker diff --git a/tests/test_pages/test_generic.py b/tests/test_pages/test_generic.py index f3dde3ba18b0595cdc55912604ac89fe4ed1bff3..33002059de970de27918623be80a765dc0d821a7 100644 --- a/tests/test_pages/test_generic.py +++ b/tests/test_pages/test_generic.py @@ -35,8 +35,7 @@ from course.constants import MAX_EXTRA_CREDIT_FACTOR from course.page.base import AnswerFeedback from tests.base_test_mixins import ( - SingleCoursePageTestMixin, FallBackStorageMessageTestMixin, - SubprocessRunpyContainerMixin) + SingleCoursePageTestMixin, FallBackStorageMessageTestMixin) from tests.utils import LocmemBackendTestsMixin QUIZ_FLOW_ID = "quiz-test" @@ -621,143 +620,6 @@ class SingleCourseQuizPageGradeInterfaceTest(LocmemBackendTestsMixin, # }}} -class SingleCourseQuizPageCodeQuestionTest( - SingleCoursePageTestMixin, FallBackStorageMessageTestMixin, - SubprocessRunpyContainerMixin, TestCase): - flow_id = QUIZ_FLOW_ID - - @classmethod - def setUpTestData(cls): # noqa - super(SingleCourseQuizPageCodeQuestionTest, cls).setUpTestData() - cls.c.force_login(cls.student_participation.user) - cls.start_flow(cls.flow_id) - - def setUp(self): # noqa - super(SingleCourseQuizPageCodeQuestionTest, self).setUp() - # This is needed to ensure student is logged in - self.c.force_login(self.student_participation.user) - - def test_code_page_correct(self): - page_id = "addition" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['c = b + a\r']}) - self.assertEqual(resp.status_code, 200) - self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) - self.assertEqual(self.end_flow().status_code, 200) - self.assertSessionScoreEqual(1) - - def test_code_page_wrong(self): - page_id = "addition" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['c = a - b\r']}) - self.assertEqual(resp.status_code, 200) - self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) - self.assertEqual(self.end_flow().status_code, 200) - self.assertSessionScoreEqual(0) - - def test_code_page_identical_to_reference(self): - page_id = "addition" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['c = a + b\r']}) - self.assertEqual(resp.status_code, 200) - self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, - ("It looks like you submitted code " - "that is identical to the reference " - "solution. This is not allowed.")) - self.assertEqual(self.end_flow().status_code, 200) - self.assertSessionScoreEqual(1) - - def test_code_human_feedback_page_submit(self): - page_id = "pymult" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['c = a * b\r']}) - self.assertEqual(resp.status_code, 200) - self.assertResponseMessagesContains(resp, MESSAGE_ANSWER_SAVED_TEXT) - self.assertEqual(self.end_flow().status_code, 200) - self.assertSessionScoreEqual(None) - - def test_code_human_feedback_page_grade1(self): - page_id = "pymult" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['c = b * a\r']}) - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "'c' looks good") - self.assertEqual(self.end_flow().status_code, 200) - - grade_data = { - "grade_percent": ["100"], - "released": ["on"] - } - - resp = self.post_grade_by_page_id(page_id, grade_data) - self.assertTrue(resp.status_code, 200) - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "The human grader assigned 2/2 points.") - - # since the test_code didn't do a feedback.set_points() after - # check_scalar() - self.assertSessionScoreEqual(None) - - def test_code_human_feedback_page_grade2(self): - page_id = "pymult" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['c = a / b\r']}) - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "'c' is inaccurate") - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "The autograder assigned 0/2 points.") - - self.assertEqual(self.end_flow().status_code, 200) - - grade_data = { - "grade_percent": ["100"], - "released": ["on"] - } - resp = self.post_grade_by_page_id(page_id, grade_data) - self.assertTrue(resp.status_code, 200) - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "The human grader assigned 2/2 points.") - self.assertSessionScoreEqual(2) - - def test_code_human_feedback_page_grade3(self): - page_id = "py_simple_list" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['b = [a + 1] * 50\r']}) - - # this is testing feedback.finish(0.3, feedback_msg) - # 2 * 0.3 = 0.6 - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "The autograder assigned 0.90/3 points.") - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "The elements in b have wrong values") - self.assertEqual(self.end_flow().status_code, 200) - - # The page is not graded before human grading. - self.assertSessionScoreEqual(None) - - def test_code_human_feedback_page_grade4(self): - page_id = "py_simple_list" - resp = self.post_answer_by_page_id( - page_id, {"answer": ['b = [a] * 50\r']}) - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "b looks good") - self.assertEqual(self.end_flow().status_code, 200) - - grade_data = { - "grade_percent": ["100"], - "released": ["on"] - } - - resp = self.post_grade_by_page_id(page_id, grade_data) - self.assertTrue(resp.status_code, 200) - self.assertResponseContextAnswerFeedbackContainsFeedback( - resp, "The human grader assigned 1/1 points.") - - self.assertSessionScoreEqual(4) - - class AnswerFeedBackTest(unittest.TestCase): # TODO: more tests def test_correctness_negative(self): diff --git a/tests/test_pages/utils.py b/tests/test_pages/utils.py index 433f54625fc229e63eed5bbab7089297e326c74f..b0527a97bb6cdd32755bd39b7884e20c8f527cb5 100644 --- a/tests/test_pages/utils.py +++ b/tests/test_pages/utils.py @@ -29,6 +29,8 @@ try: except ImportError: from test.test_support import EnvironmentVarGuard # noqa +from django.test import override_settings + # Switch for test locally Debug = False @@ -72,3 +74,43 @@ SKIP_REAL_DOCKER_REASON = "These are tests for real docker" REAL_RELATE_DOCKER_URL = "unix:///var/run/docker.sock" REAL_RELATE_DOCKER_TLS_CONFIG = None REAL_RELATE_DOCKER_RUNPY_IMAGE = "inducer/relate-runpy-i386" + + +class RealDockerTestMixin(object): + """ + This is used for code question test with real docker container. + Note: the test speed is slow when using this mixin. + """ + + @classmethod + def setUpClass(cls): # noqa + from unittest import SkipTest + if skip_real_docker_test: + raise SkipTest(SKIP_REAL_DOCKER_REASON) + + super(RealDockerTestMixin, cls).setUpClass() + cls.override_docker_settings = override_settings( + RELATE_DOCKER_URL=REAL_RELATE_DOCKER_URL, + RELATE_DOCKER_RUNPY_IMAGE=REAL_RELATE_DOCKER_RUNPY_IMAGE, + RELATE_DOCKER_TLS_CONFIG=REAL_RELATE_DOCKER_TLS_CONFIG + ) + cls.override_docker_settings.enable() + cls.make_sure_docker_image_pulled() + + @classmethod + def tearDownClass(cls): # noqa + super(RealDockerTestMixin, cls).tearDownClass() + cls.override_docker_settings.disable() + + @classmethod + def make_sure_docker_image_pulled(cls): + import docker + cli = docker.Client( + base_url=REAL_RELATE_DOCKER_URL, + tls=None, + timeout=15, + version="1.19") + + if not bool(cli.images(REAL_RELATE_DOCKER_RUNPY_IMAGE)): + # This should run only once and get cached on Travis-CI + cli.pull(REAL_RELATE_DOCKER_RUNPY_IMAGE) diff --git a/tests/test_sandbox.py b/tests/test_sandbox.py index 10e1a90b1afb12d762fedd53ed4c21efde9cdc2e..301811bdbdfef631cd55f9381df99350dcc08df5 100644 --- a/tests/test_sandbox.py +++ b/tests/test_sandbox.py @@ -105,7 +105,7 @@ class SingleCoursePageSandboxTestBaseMixin(SingleCourseTestMixin): def get_sandbox_page_session(self): return self.get_sandbox_data_by_key(PAGE_SESSION_KEY_PREFIX) - def assertSandboxHaveValidPage(self, resp): # noqa + def assertSandboxHasValidPage(self, resp): # noqa self.assertResponseContextEqual(resp, HAVE_VALID_PAGE, True) def assertSandboxWarningTextContain(self, resp, expected_text, loose=False): # noqa @@ -120,7 +120,7 @@ class SingleCoursePageSandboxTestBaseMixin(SingleCourseTestMixin): warnings_strs = "".join(warnings_strs) self.assertIn(expected_text, warnings_strs) - def assertSandboxNotHaveValidPage(self, resp): # noqa + def assertSandboxNotHasValidPage(self, resp): # noqa self.assertResponseContextEqual(resp, HAVE_VALID_PAGE, False) @@ -134,7 +134,7 @@ class SingleCoursePageSandboxTest(SingleCoursePageSandboxTestBaseMixin, TestCase # Check one of the quiz questions resp = self.get_page_sandbox_preview_response(QUESTION_MARKUP) self.assertEqual(resp.status_code, 200) - self.assertSandboxHaveValidPage(resp) + self.assertSandboxHasValidPage(resp) self.assertResponseContextIsNone(resp, "feedback") from course.page.text import CORRECT_ANSWER_PATTERN