Newer
Older
def get_flow_page_class(repo, typename, commit_sha):
# look among default page types
import course.page
try:
return getattr(course.page, typename)
except AttributeError:
pass
# try a global dotted-name import
try:
return import_class(typename)
except ClassNotFoundError:
pass
if typename.startswith("repo:"):
stripped_typename = typename[5:]
components = stripped_typename.split(".")
raise ClassNotFoundError(
_("repo page class must conist of two "
"dotted components (invalid: '%s')")
% typename)
module_name = "code/"+module+".py"
module_code = get_repo_blob(repo, module_name, commit_sha).data
exec(compile(module_code, module_name, 'exec'), module_dict)
try:
return module_dict[classname]
except AttributeError:
raise ClassNotFoundError(typename)
else:
raise ClassNotFoundError(typename)
def instantiate_flow_page(location, repo, page_desc, commit_sha):
class_ = get_flow_page_class(repo, page_desc.type, commit_sha)
return class_(None, location, page_desc)
# }}}
# {{{ page data wrangling
def _adjust_flow_session_page_data_inner(repo, flow_session,
course_identifier, flow_desc):
commit_sha = get_course_commit_sha(
flow_session.course, flow_session.participation)
from course.models import FlowPageData
def remove_page(fpd):
if fpd.ordinal is not None:
fpd.ordinal = None
fpd.save()
desc_group_ids = []
ordinal = [0]
for grp in flow_desc.groups:
desc_group_ids.append(grp.id)
shuffle = getattr(grp, "shuffle", False)
max_page_count = getattr(grp, "max_page_count", None)
available_page_ids = [page_desc.id for page_desc in grp.pages]
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
if max_page_count is None:
max_page_count = len(available_page_ids)
group_pages = []
# {{{ helper functions
def find_page_desc(page_id):
new_page_desc = None
for page_desc in grp.pages:
if page_desc.id == page_id:
new_page_desc = page_desc
break
assert new_page_desc is not None
return new_page_desc
def create_fpd(new_page_desc):
page = instantiate_flow_page(
"course '%s', flow '%s', page '%s/%s'"
% (course_identifier, flow_session.flow_id,
grp.id, new_page_desc.id),
repo, new_page_desc, commit_sha)
return FlowPageData(
flow_session=flow_session,
ordinal=None,
page_type=new_page_desc.type,
page_id=new_page_desc.id,
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
data=page.make_page_data())
def add_page(fpd):
if fpd.ordinal != ordinal[0]:
fpd.ordinal = ordinal[0]
fpd.save()
ordinal[0] += 1
available_page_ids.remove(fpd.page_id)
group_pages.append(fpd)
# }}}
if shuffle:
# maintain order of existing pages as much as possible
for fpd in (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
ordinal__isnull=False)
.order_by("ordinal")):
if (fpd.page_id in available_page_ids
and len(group_pages) < max_page_count):
add_page(fpd)
else:
remove_page(fpd)
assert len(group_pages) <= max_page_count
from random import choice
# then add randomly chosen new pages
while len(group_pages) < max_page_count and available_page_ids:
new_page_id = choice(available_page_ids)
new_page_fpds = (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
page_id=new_page_id))
if new_page_fpds.count():
# We already have FlowPageData for this page, revive it
new_page_fpd, = new_page_fpds
assert new_page_fpd.id == new_page_id
else:
# Make a new FlowPageData instance
page_desc = find_page_desc(new_page_id)
assert page_desc.id == new_page_id
new_page_fpd = create_fpd(page_desc)
assert new_page_fpd.page_id == new_page_id
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
add_page(new_page_fpd)
else:
# reorder pages to order in flow
id_to_fpd = dict(
((fpd.group_id, fpd.page_id), fpd)
for fpd in FlowPageData.objects.filter(
flow_session=flow_session,
group_id=grp.id))
for page_desc in grp.pages:
key = (grp.id, page_desc.id)
if key in id_to_fpd:
fpd = id_to_fpd.pop(key)
else:
fpd = create_fpd(page_desc)
if len(group_pages) < max_page_count:
add_page(fpd)
for fpd in id_to_fpd.values():
remove_page(fpd)
# {{{ remove pages orphaned because of group renames
for fpd in (
FlowPageData.objects
.filter(
flow_session=flow_session,
ordinal__isnull=False)
.exclude(group_id__in=desc_group_ids)
):
remove_page(fpd)
# }}}
if flow_session.page_count != ordinal[0]:
flow_session.page_count = ordinal[0]
def adjust_flow_session_page_data(repo, flow_session,
course_identifier, flow_desc):
# The atomicity is not done as a decorator above because we can't import
# django.db at the module level here. The relate-validate script wants to
# import this module, and it obviously has no database.
from django.db import transaction
with transaction.atomic():
return _adjust_flow_session_page_data_inner(
repo, flow_session, course_identifier, flow_desc)
def get_course_commit_sha(course, participation):
sha = course.active_git_commit_sha
if participation is not None and participation.preview_git_commit_sha:
sha = participation.preview_git_commit_sha
return sha.encode()
def list_flow_ids(repo, commit_sha):
flow_ids = []
try:
flows_tree = get_repo_blob(repo, "flows", commit_sha)
except ObjectDoesNotExist:
# That's OK--no flows yet.
pass
else:
for entry in flows_tree.items():
flow_ids.append(entry.path[:-4])
return sorted(flow_ids)