Newer
Older
else:
assert (
len([f for f in zf.filelist if
f.filename.endswith(dl_file_extension)])
== file_with_ext_count)
@classmethod
def submit_page_answer_by_ordinal_and_test(
cls, page_ordinal, use_correct_answer=True, answer_data=None,
skip_code_question=True,
expected_grades=None, expected_post_answer_status_code=200,
do_grading=False, do_human_grade=False, grade_data=None,
grade_data_extra_kwargs=None,
dl_file_extension=None,
ensure_grading_ui_get_before_grading=False,
ensure_grading_ui_get_after_grading=False,
ensure_analytic_page_get_before_submission=False,
ensure_analytic_page_get_after_submission=False,
ensure_analytic_page_get_before_grading=False,
ensure_analytic_page_get_after_grading=False,
ensure_download_before_submission=False,
ensure_download_after_submission=False,
ensure_download_before_grading=False,
ensure_download_after_grading=False,
dl_file_with_ext_count=None):
page_id = cls.get_page_id_via_page_oridnal(page_ordinal)
return cls.submit_page_answer_by_page_id_and_test(
page_id, use_correct_answer,
answer_data, skip_code_question, expected_grades,
expected_post_answer_status_code,
do_grading, do_human_grade,
grade_data, grade_data_extra_kwargs, dl_file_extension,
ensure_grading_ui_get_before_grading,
ensure_grading_ui_get_after_grading,
ensure_analytic_page_get_before_submission,
ensure_analytic_page_get_after_submission,
ensure_analytic_page_get_before_grading,
ensure_analytic_page_get_after_grading,
ensure_download_before_submission,
ensure_download_after_submission,
ensure_download_before_grading,
ensure_download_after_grading,
dl_file_with_ext_count)
@classmethod
def submit_page_answer_by_page_id_and_test(
cls, page_id, use_correct_answer=True, answer_data=None,
skip_code_question=True,
expected_grades=None, expected_post_answer_status_code=200,
do_grading=False, do_human_grade=False, grade_data=None,
grade_data_extra_kwargs=None,
dl_file_extension=None,
ensure_grading_ui_get_before_grading=False,
ensure_grading_ui_get_after_grading=False,
ensure_analytic_page_get_before_submission=False,
ensure_analytic_page_get_after_submission=False,
ensure_analytic_page_get_before_grading=False,
ensure_analytic_page_get_after_grading=False,
ensure_download_before_submission=False,
ensure_download_after_submission=False,
ensure_download_before_grading=False,
ensure_download_after_grading=False,
dl_file_with_ext_count=None):
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
if answer_data is not None:
assert isinstance(answer_data, dict)
use_correct_answer = False
submit_answer_response = None
post_grade_response = None
for page_tuple in TEST_PAGE_TUPLE:
if skip_code_question and page_tuple.need_runpy:
continue
if page_id == page_tuple.page_id:
group_id = page_tuple.group_id
if ensure_grading_ui_get_before_grading:
cls.ensure_grading_ui_get(page_id)
if ensure_analytic_page_get_before_submission:
cls.ensure_analytic_page_get(group_id, page_id)
if ensure_download_before_submission:
cls.ensure_download_submission(group_id, page_id)
if page_tuple.correct_answer is not None:
if answer_data is None:
answer_data = page_tuple.correct_answer
if page_id in ["anyup", "proof"]:
file_path = answer_data["uploaded_file"]
if not file_path:
# submitting an empty answer
submit_answer_response = (
cls.post_answer_by_page_id(page_id, answer_data))
else:
if isinstance(file_path, list):
file_path, = file_path
file_path = file_path.strip()
with open(file_path, 'rb') as fp:
answer_data = {"uploaded_file": fp}
submit_answer_response = (
cls.post_answer_by_page_id(
page_id, answer_data))
else:
submit_answer_response = (
cls.post_answer_by_page_id(page_id, answer_data))
# Fixed #514
# https://github.com/inducer/relate/issues/514
submit_answer_response.context["form"].as_p()
== expected_post_answer_status_code), (
"%s != %s" % (submit_answer_response.status_code,
expected_post_answer_status_code))
if ensure_analytic_page_get_after_submission:
cls.ensure_analytic_page_get(group_id, page_id)
if ensure_download_after_submission:
cls.ensure_download_submission(group_id, page_id)
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
if not do_grading:
break
assert cls.end_flow().status_code == 200
if ensure_analytic_page_get_before_grading:
cls.ensure_analytic_page_get(group_id, page_id)
if ensure_download_before_grading:
cls.ensure_download_submission(group_id, page_id)
if page_tuple.correct_answer is not None:
if use_correct_answer:
expected_grades = page_tuple.full_points
if page_tuple.need_human_grade:
if not do_human_grade:
cls.assertSessionScoreEqual(None)
break
if grade_data is not None:
assert isinstance(grade_data, dict)
else:
grade_data = page_tuple.grade_data.copy()
if grade_data_extra_kwargs:
assert isinstance(grade_data_extra_kwargs, dict)
grade_data.update(grade_data_extra_kwargs)
post_grade_response = cls.post_grade_by_page_id(
page_id, grade_data)
cls.assertSessionScoreEqual(expected_grades)
if not dl_file_extension:
dl_file_extension = page_tuple.dl_file_extension
if ensure_download_after_grading:
cls.ensure_download_submission(
group_id, page_id,
file_with_ext_count=dl_file_with_ext_count)
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
if ensure_analytic_page_get_after_grading:
cls.ensure_analytic_page_get(group_id, page_id)
if ensure_grading_ui_get_after_grading:
cls.ensure_grading_ui_get(page_id)
return submit_answer_response, post_grade_response
def default_submit_page_answer_by_page_id_and_test(self, page_id,
answer_data=None,
expected_grade=None,
do_grading=True,
grade_data=None,
grade_data_extra_kwargs=None,
):
return self.submit_page_answer_by_page_id_and_test(
page_id, answer_data=answer_data,
skip_code_question=self.skip_code_question,
expected_grades=expected_grade, expected_post_answer_status_code=200,
do_grading=do_grading, do_human_grade=True, grade_data=grade_data,
grade_data_extra_kwargs=grade_data_extra_kwargs,
ensure_grading_ui_get_before_grading=True,
ensure_grading_ui_get_after_grading=True,
ensure_analytic_page_get_before_submission=True,
ensure_analytic_page_get_after_submission=True,
ensure_analytic_page_get_before_grading=True,
ensure_analytic_page_get_after_grading=True,
ensure_download_before_submission=True,
ensure_download_after_submission=True,
ensure_download_before_grading=True,
ensure_download_after_grading=True)
@classmethod
def submit_page_human_grading_by_page_id_and_test(
cls, page_id,
expected_post_grading_status_code=200,
grade_data=None,
expected_grades=None,
do_session_score_equal_assersion=True,
grade_data_extra_kwargs=None,
force_login_instructor=True,
ensure_grading_ui_get_before_grading=False,
ensure_grading_ui_get_after_grading=False,
ensure_analytic_page_get_before_grading=False,
ensure_analytic_page_get_after_grading=False,
ensure_download_before_grading=False,
ensure_download_after_grading=False):
# this helper is expected to be used when the session is finished
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
post_grade_response = None
for page_tuple in TEST_PAGE_TUPLE:
if page_id == page_tuple.page_id:
group_id = page_tuple.group_id
if ensure_grading_ui_get_before_grading:
cls.ensure_grading_ui_get(page_id)
if ensure_analytic_page_get_before_grading:
cls.ensure_analytic_page_get(group_id, page_id)
if ensure_download_before_grading:
cls.ensure_download_submission(group_id, page_id)
if not page_tuple.need_human_grade:
break
assign_full_grades = True
if grade_data is not None:
assert isinstance(grade_data, dict)
assign_full_grades = False
else:
grade_data = page_tuple.grade_data.copy()
if assign_full_grades:
expected_grades = page_tuple.full_points
if grade_data_extra_kwargs:
assert isinstance(grade_data_extra_kwargs, dict)
grade_data.update(grade_data_extra_kwargs)
post_grade_response = cls.post_grade_by_page_id(
page_id, grade_data,
force_login_instructor=force_login_instructor)
assert (post_grade_response.status_code
== expected_post_grading_status_code)
if post_grade_response.status_code == 200:
if do_session_score_equal_assersion:
cls.assertSessionScoreEqual(expected_grades)
if ensure_download_after_grading:
cls.ensure_download_submission(group_id, page_id)
if ensure_analytic_page_get_after_grading:
cls.ensure_analytic_page_get(group_id, page_id)
if ensure_grading_ui_get_after_grading:
cls.ensure_grading_ui_get(page_id)
return post_grade_response
class FallBackStorageMessageTestMixin(object):
# In case other message storage are used, the following is the default
# storage used by django and RELATE. Tests which concerns the message
# should not include this mixin.
storage = 'django.contrib.messages.storage.fallback.FallbackStorage'
def setUp(self): # noqa
self.msg_settings_override = override_settings(MESSAGE_STORAGE=self.storage)
self.msg_settings_override.enable()
self.addCleanup(self.msg_settings_override.disable)
def get_listed_storage_from_response(self, response):
return list(self.get_response_context_value_by_name(response, 'messages'))
def clear_message_response_storage(self, response):
# this should only be used for debug, because we are using private method
# which might change
try:
storage = self.get_response_context_value_by_name(response, 'messages')
except AssertionError:
# message doesn't exist in response context
return
if hasattr(storage, '_loaded_data'):
storage._loaded_data = []
elif hasattr(storage, '_loaded_message'):
storage._loaded_messages = []
if hasattr(storage, '_queued_messages'):
storage._queued_messages = []
self.assertEqual(len(storage), 0)
def assertResponseMessagesCount(self, response, expected_count): # noqa
storage = self.get_listed_storage_from_response(response)
self.assertEqual(len(storage), expected_count)
def assertResponseMessagesEqual(self, response, expected_messages): # noqa
storage = self.get_listed_storage_from_response(response)
if not isinstance(expected_messages, list):
expected_messages = [expected_messages]
self.assertEqual(len([m for m in storage]), len(expected_messages))
self.assertEqual([m.message for m in storage], expected_messages)
def assertResponseMessagesEqualRegex(self, response, expected_message_regexs): # noqa
storage = self.get_listed_storage_from_response(response)
if not isinstance(expected_message_regexs, list):
expected_message_regexs = [expected_message_regexs]
self.assertEqual(len([m for m in storage]), len(expected_message_regexs))
messages = [m.message for m in storage]
for idx, m in enumerate(messages):
six.assertRegex(self, m, expected_message_regexs[idx])
def assertResponseMessagesContains(self, response, expected_messages, # noqa
loose=False):
storage = self.get_listed_storage_from_response(response)
if isinstance(expected_messages, str):
expected_messages = [expected_messages]
messages = [m.message for m in storage]
if loose:
from django.utils.encoding import force_text
messages = " ".join([force_text(m) for m in messages])
for em in expected_messages:
self.assertIn(em, messages)
def assertResponseMessageLevelsEqual(self, response, expected_levels): # noqa
storage = self.get_listed_storage_from_response(response)
self.assertEqual([m.level for m in storage], expected_levels)
def debug_print_response_messages(self, response):
"""
For debugging :class:`django.contrib.messages` objects in post response
:param response: response
"""
try:
storage = self.get_listed_storage_from_response(response)
print("\n-----------message start (%i total)-------------"
% len(storage))
for m in storage:
print(m.message)
print("-----------message end-------------\n")
except KeyError:
print("\n-------no message----------")
Andreas Klöckner
committed
class SubprocessRunpyContainerMixin(object):
"""
This mixin is used to fake a runpy container, only needed when
the TestCase include test(s) for code questions
"""
@classmethod
def setUpClass(cls): # noqa
if six.PY2:
from unittest import SkipTest
raise SkipTest("In process fake container is configured for "
"PY3 only, since currently runpy docker only "
"provide PY3 envrionment")
Andreas Klöckner
committed
super(SubprocessRunpyContainerMixin, cls).setUpClass()
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
python_executable = os.getenv("PY_EXE")
if not python_executable:
python_executable = sys.executable
import subprocess
args = [python_executable,
os.path.abspath(
os.path.join(
os.path.dirname(__file__), os.pardir,
"docker-image-run-py", "runpy")),
]
cls.faked_container_process = subprocess.Popen(
args,
stdout=subprocess.DEVNULL,
# because runpy prints to stderr
stderr=subprocess.DEVNULL
)
def setUp(self):
super(SubprocessRunpyContainerMixin, self).setUp()
self.faked_container_patch = mock.patch(
"course.page.code.SPAWN_CONTAINERS_FOR_RUNPY", False)
self.faked_container_patch.start()
self.addCleanup(self.faked_container_patch.stop)
Andreas Klöckner
committed
super(SubprocessRunpyContainerMixin, cls).tearDownClass()
from course.page.code import SPAWN_CONTAINERS_FOR_RUNPY
# Make sure SPAWN_CONTAINERS_FOR_RUNPY is reset to True
assert SPAWN_CONTAINERS_FOR_RUNPY
if sys.platform.startswith("win"):
# Without these lines, tests on Appveyor hanged when all tests
# finished.
# However, On nix platforms, these lines resulted in test
# failure when there were more than one TestCases which were using
# this mixin. So we don't kill the subprocess, and it won't bring
# bad side effects to remainder tests.
cls.faked_container_process.kill()
def improperly_configured_cache_patch():
# can be used as context manager or decorator
if six.PY3:
built_in_import_path = "builtins.__import__"
import builtins # noqa
else:
built_in_import_path = "__builtin__.__import__"
import __builtin__ as builtins # noqa
built_in_import = builtins.__import__
def my_disable_cache_import(name, globals=None, locals=None, fromlist=(),
level=0):
if name == "django.core.cache":
raise ImproperlyConfigured()
return built_in_import(name, globals, locals, fromlist, level)
return mock.patch(built_in_import_path, side_effect=my_disable_cache_import)
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
# {{{ admin
ADMIN_TWO_COURSE_SETUP_LIST = deepcopy(TWO_COURSE_SETUP_LIST)
# switch roles
ADMIN_TWO_COURSE_SETUP_LIST[1]["participations"][0]["role_identifier"] = "ta"
ADMIN_TWO_COURSE_SETUP_LIST[1]["participations"][1]["role_identifier"] = "instructor" # noqa
class AdminTestMixin(TwoCourseTestMixin):
courses_setup_list = ADMIN_TWO_COURSE_SETUP_LIST
none_participation_user_create_kwarg_list = (
NONE_PARTICIPATION_USER_CREATE_KWARG_LIST)
@classmethod
def setUpTestData(cls): # noqa
super(AdminTestMixin, cls).setUpTestData() # noqa
# create 2 participation (with new user) for course1
from tests.factories import ParticipationFactory
cls.course1_student_participation2 = (
ParticipationFactory.create(course=cls.course1))
cls.course1_student_participation3 = (
ParticipationFactory.create(course=cls.course1))
cls.instructor1 = cls.course1_instructor_participation.user
cls.instructor2 = cls.course2_instructor_participation.user
assert cls.instructor1 != cls.instructor2
# grant all admin permissions to instructors
from django.contrib.auth.models import Permission
for user in [cls.instructor1, cls.instructor2]:
user.is_staff = True
user.save()
for perm in Permission.objects.all():
user.user_permissions.add(perm)
@classmethod
def get_admin_change_list_view_url(cls, app_name, model_name):
return reverse("admin:%s_%s_changelist" % (app_name, model_name))
@classmethod
def get_admin_change_view_url(cls, app_name, model_name, args=None):
if args is None:
args = []
return reverse("admin:%s_%s_change" % (app_name, model_name), args=args)
@classmethod
def get_admin_add_view_url(cls, app_name, model_name, args=None):
if args is None:
args = []
return reverse("admin:%s_%s_add" % (app_name, model_name), args=args)
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
def get_admin_form_fields(self, response):
"""
Return a list of AdminFields for the AdminForm in the response.
"""
admin_form = response.context['adminform']
fieldsets = list(admin_form)
field_lines = []
for fieldset in fieldsets:
field_lines += list(fieldset)
fields = []
for field_line in field_lines:
fields += list(field_line)
return fields
def get_admin_form_fields_names(self, response):
return [f.field.name for f in self.get_admin_form_fields(response)]
def get_changelist(self, request, model, modeladmin):
from django.contrib.admin.views.main import ChangeList
return ChangeList(
request, model, modeladmin.list_display,
modeladmin.list_display_links, modeladmin.get_list_filter(request),
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page,
modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin,
)
def get_filterspec_list(self, request, changelist=None, model=None,
modeladmin=None):
if changelist is None:
assert request and model and modeladmin
changelist = self.get_changelist(request, model, modeladmin)
filterspecs = changelist.get_filters(request)[0]
filterspec_list = []
for filterspec in filterspecs:
choices = tuple(c['display'] for c in filterspec.choices(changelist))
filterspec_list.append(choices)
return filterspec_list
# }}}
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
class HackRepoMixin(object):
# This is need to for correctly getting other blobs
fallback_commit_sha = b"4124e0c23e369d6709a670398167cb9c2fe52d35"
# This need to be configured when the module tested imported get_repo_blob
# at module level
get_repo_blob_patching_path = "course.content.get_repo_blob"
@classmethod
def setUpTestData(cls): # noqa
super(HackRepoMixin, cls).setUpTestData()
class Blob(object):
def __init__(self, yaml_file_name):
with open(os.path.join(FAKED_YAML_PATH, yaml_file_name), "rb") as f:
data = f.read()
self.data = data
def get_repo_side_effect(repo, full_name, commit_sha, allow_tree=True):
commit_sha_path_maps = COMMIT_SHA_MAP.get(full_name)
if commit_sha_path_maps:
assert isinstance(commit_sha_path_maps, list)
for cs_map in commit_sha_path_maps:
if commit_sha.decode() in cs_map:
path = cs_map[commit_sha.decode()]["path"]
return Blob(path)
return get_repo_blob(repo, full_name, cls.fallback_commit_sha,
allow_tree=allow_tree)
cls.batch_fake_get_repo_blob = mock.patch(cls.get_repo_blob_patching_path)
cls.mock_get_repo_blob = cls.batch_fake_get_repo_blob.start()
cls.mock_get_repo_blob.side_effect = get_repo_side_effect
@classmethod
def tearDownClass(cls): # noqa
# This must be done to avoid inconsistency
super(HackRepoMixin, cls).tearDownClass()
cls.batch_fake_get_repo_blob.stop()
def get_current_page_ids(self):
current_sha = self.course.active_git_commit_sha
for commit_sha_path_maps in COMMIT_SHA_MAP.values():
for cs_map in commit_sha_path_maps:
if current_sha in cs_map:
return cs_map[current_sha]["page_ids"]
raise ValueError("Page_ids for that commit_sha doesn't exist")
def assertGradeInfoEqual(self, resp, expected_grade_info_dict=None): # noqa
grade_info = resp.context["grade_info"]
assert isinstance(grade_info, GradeInfo)
if not expected_grade_info_dict:
import json
error_msg = ("\n%s" % json.dumps(OrderedDict(
sorted(
[(k, v) for (k, v) in six.iteritems(grade_info.__dict__)])),
indent=4))
error_msg = error_msg.replace("null", "None")
self.fail(error_msg)
assert isinstance(expected_grade_info_dict, dict)
grade_info_dict = grade_info.__dict__
not_match_infos = []
for k in grade_info_dict.keys():
if grade_info_dict[k] != expected_grade_info_dict[k]:
not_match_infos.append(
"'%s' is expected to be %s, while got %s"
% (k, str(expected_grade_info_dict[k]),
str(grade_info_dict[k])))
if not_match_infos:
self.fail("\n".join(not_match_infos))
# vim: fdm=marker