Newer
Older
if len(components) < 2:
# need at least one module plus class name
raise ClassNotFoundError(name)
module_name = ".".join(components[:-1])
try:
mod = __import__(module_name)
except ImportError:
raise ClassNotFoundError(name)
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ClassNotFoundError(name)
def get_flow_page_class(repo, typename, commit_sha):
# look among default page types
import course.page
try:
return getattr(course.page, typename)
except AttributeError:
pass
# try a global dotted-name import
try:
return import_class(typename)
except ClassNotFoundError:
pass
if typename.startswith("repo:"):
stripped_typename = typename[5:]
components = stripped_typename.split(".")
raise ClassNotFoundError(
_("repo page class must conist of two "
"dotted components (invalid: '%s')")
% typename)
module_name = "code/"+module+".py"
module_code = get_repo_blob(repo, module_name, commit_sha).data
exec(compile(module_code, module_name, 'exec'), module_dict)
try:
return module_dict[classname]
except AttributeError:
raise ClassNotFoundError(typename)
else:
raise ClassNotFoundError(typename)
def instantiate_flow_page(location, repo, page_desc, commit_sha):
class_ = get_flow_page_class(repo, page_desc.type, commit_sha)
return class_(None, location, page_desc)
# }}}
# {{{ page data wrangling
def _adjust_flow_session_page_data_inner(repo, flow_session,
course_identifier, flow_desc):
commit_sha = get_course_commit_sha(
flow_session.course, flow_session.participation)
from course.models import FlowPageData
def remove_page(fpd):
if fpd.ordinal is not None:
fpd.ordinal = None
fpd.save()
desc_group_ids = []
ordinal = [0]
for grp in flow_desc.groups:
desc_group_ids.append(grp.id)
shuffle = getattr(grp, "shuffle", False)
max_page_count = getattr(grp, "max_page_count", None)
available_page_ids = [page_desc.id for page_desc in grp.pages]
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
if max_page_count is None:
max_page_count = len(available_page_ids)
group_pages = []
# {{{ helper functions
def find_page_desc(page_id):
new_page_desc = None
for page_desc in grp.pages:
if page_desc.id == page_id:
new_page_desc = page_desc
break
assert new_page_desc is not None
return new_page_desc
def create_fpd(new_page_desc):
page = instantiate_flow_page(
"course '%s', flow '%s', page '%s/%s'"
% (course_identifier, flow_session.flow_id,
grp.id, new_page_desc.id),
repo, new_page_desc, commit_sha)
return FlowPageData(
flow_session=flow_session,
ordinal=None,
page_type=new_page_desc.type,
page_id=new_page_desc.id,
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
data=page.make_page_data())
def add_page(fpd):
if fpd.ordinal != ordinal[0]:
fpd.ordinal = ordinal[0]
fpd.save()
ordinal[0] += 1
available_page_ids.remove(fpd.page_id)
group_pages.append(fpd)
# }}}
if shuffle:
# maintain order of existing pages as much as possible
for fpd in (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
ordinal__isnull=False)
.order_by("ordinal")):
if (fpd.page_id in available_page_ids
and len(group_pages) < max_page_count):
add_page(fpd)
else:
remove_page(fpd)
assert len(group_pages) <= max_page_count
from random import choice
# then add randomly chosen new pages
while len(group_pages) < max_page_count and available_page_ids:
new_page_id = choice(available_page_ids)
new_page_fpds = (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
page_id=new_page_id))
if new_page_fpds.count():
# We already have FlowPageData for this page, revive it
new_page_fpd, = new_page_fpds
assert new_page_fpd.id == new_page_id
else:
# Make a new FlowPageData instance
page_desc = find_page_desc(new_page_id)
assert page_desc.id == new_page_id
new_page_fpd = create_fpd(page_desc)
assert new_page_fpd.page_id == new_page_id
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
add_page(new_page_fpd)
else:
# reorder pages to order in flow
id_to_fpd = dict(
((fpd.group_id, fpd.page_id), fpd)
for fpd in FlowPageData.objects.filter(
flow_session=flow_session,
group_id=grp.id))
for page_desc in grp.pages:
key = (grp.id, page_desc.id)
if key in id_to_fpd:
fpd = id_to_fpd.pop(key)
else:
fpd = create_fpd(page_desc)
if len(group_pages) < max_page_count:
add_page(fpd)
for fpd in id_to_fpd.values():
remove_page(fpd)
# {{{ remove pages orphaned because of group renames
for fpd in (
FlowPageData.objects
.filter(
flow_session=flow_session,
ordinal__isnull=False)
.exclude(group_id__in=desc_group_ids)
):
remove_page(fpd)
# }}}
if flow_session.page_count != ordinal[0]:
flow_session.page_count = ordinal[0]
def adjust_flow_session_page_data(repo, flow_session,
course_identifier, flow_desc):
# The atomicity is not done as a decorator above because we can't import
# django.db at the module level here. The relate-validate script wants to
# import this module, and it obviously has no database.
from django.db import transaction
with transaction.atomic():
return _adjust_flow_session_page_data_inner(
repo, flow_session, course_identifier, flow_desc)
def get_course_commit_sha(course, participation):
sha = course.active_git_commit_sha
if participation is not None and participation.preview_git_commit_sha:
sha = participation.preview_git_commit_sha
return sha.encode()
def list_flow_ids(repo, commit_sha):
flow_ids = []
try:
flows_tree = get_repo_blob(repo, "flows", commit_sha)
except ObjectDoesNotExist:
# That's OK--no flows yet.
pass
else:
for entry in flows_tree.items():
flow_ids.append(entry.path[:-4])
return sorted(flow_ids)