Newer
Older
def get_staticpage_desc(repo, course, commit_sha, filename):
page_desc = get_yaml_from_repo(repo, filename, commit_sha)
page_desc = normalize_page_desc(page_desc)
return page_desc
def get_course_desc(repo, course, commit_sha):
return get_staticpage_desc(repo, course, commit_sha, course.course_file)
def normalize_flow_desc(flow_desc):
if hasattr(flow_desc, "pages"):
pages = flow_desc.pages
from relate.utils import struct_to_dict, Struct
d = struct_to_dict(flow_desc)
del d["pages"]
d["groups"] = [Struct({"id": "main", "pages": pages})]
return Struct(d)
Andreas Klöckner
committed
if hasattr(flow_desc, "rules"):
rules = flow_desc.rules
if not hasattr(rules, "grade_identifier"):
# Legacy content with grade_identifier in grading rule,
# move first found grade_identifier up to rules.
Andreas Klöckner
committed
rules.grade_identifier = None
rules.grade_aggregation_strategy = None
Andreas Klöckner
committed
for grule in rules.grading:
if grule.grade_identifier is not None:
rules.grade_identifier = grule.grade_identifier
rules.grade_aggregation_strategy = \
grule.grade_aggregation_strategy
break
def get_flow_desc(repo, course, flow_id, commit_sha):
flow_desc = get_yaml_from_repo(repo, "flows/%s.yml" % flow_id, commit_sha)
flow_desc = normalize_flow_desc(flow_desc)
flow_desc.description_html = markup_to_html(
course, repo, commit_sha, getattr(flow_desc, "description", None))
return flow_desc
def get_flow_page_desc(flow_id, flow_desc, group_id, page_id):
for grp in flow_desc.groups:
if grp.id == group_id:
for page in grp.pages:
if page.id == page_id:
return page
raise ObjectDoesNotExist(
_("page '%(group_id)s/%(page_id)s' in flow '%(flow_id)s'") % {
'group_id': group_id,
'page_id': page_id,
'flow_id': flow_id
})
# }}}
# {{{ flow page handling
class ClassNotFoundError(RuntimeError):
def import_class(name):
components = name.split('.')
if len(components) < 2:
# need at least one module plus class name
raise ClassNotFoundError(name)
module_name = ".".join(components[:-1])
try:
mod = __import__(module_name)
except ImportError:
raise ClassNotFoundError(name)
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ClassNotFoundError(name)
def get_flow_page_class(repo, typename, commit_sha):
# look among default page types
import course.page
try:
return getattr(course.page, typename)
except AttributeError:
pass
# try a global dotted-name import
try:
return import_class(typename)
except ClassNotFoundError:
pass
if typename.startswith("repo:"):
stripped_typename = typename[5:]
components = stripped_typename.split(".")
raise ClassNotFoundError(
_("repo page class must conist of two "
"dotted components (invalid: '%s')")
% typename)
module_name = "code/"+module+".py"
module_code = get_repo_blob(repo, module_name, commit_sha).data
exec(compile(module_code, module_name, 'exec'), module_dict)
try:
return module_dict[classname]
except AttributeError:
raise ClassNotFoundError(typename)
else:
raise ClassNotFoundError(typename)
def instantiate_flow_page(location, repo, page_desc, commit_sha):
class_ = get_flow_page_class(repo, page_desc.type, commit_sha)
return class_(None, location, page_desc)
# }}}
# {{{ page data wrangling
def _adjust_flow_session_page_data_inner(repo, flow_session,
course_identifier, flow_desc):
commit_sha = get_course_commit_sha(
flow_session.course, flow_session.participation)
from course.models import FlowPageData
def remove_page(fpd):
if fpd.ordinal is not None:
fpd.ordinal = None
fpd.save()
desc_group_ids = []
ordinal = [0]
for grp in flow_desc.groups:
desc_group_ids.append(grp.id)
shuffle = getattr(grp, "shuffle", False)
max_page_count = getattr(grp, "max_page_count", None)
available_page_ids = [page_desc.id for page_desc in grp.pages]
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
if max_page_count is None:
max_page_count = len(available_page_ids)
group_pages = []
# {{{ helper functions
def find_page_desc(page_id):
new_page_desc = None
for page_desc in grp.pages:
if page_desc.id == page_id:
new_page_desc = page_desc
break
assert new_page_desc is not None
return new_page_desc
def create_fpd(new_page_desc):
page = instantiate_flow_page(
"course '%s', flow '%s', page '%s/%s'"
% (course_identifier, flow_session.flow_id,
grp.id, new_page_desc.id),
repo, new_page_desc, commit_sha)
return FlowPageData(
flow_session=flow_session,
ordinal=None,
page_type=new_page_desc.type,
page_id=new_page_desc.id,
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
data=page.make_page_data())
def add_page(fpd):
if fpd.ordinal != ordinal[0]:
fpd.ordinal = ordinal[0]
fpd.save()
ordinal[0] += 1
available_page_ids.remove(fpd.page_id)
group_pages.append(fpd)
# }}}
if shuffle:
# maintain order of existing pages as much as possible
for fpd in (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
ordinal__isnull=False)
.order_by("ordinal")):
if (fpd.page_id in available_page_ids
and len(group_pages) < max_page_count):
add_page(fpd)
else:
remove_page(fpd)
assert len(group_pages) <= max_page_count
from random import choice
# then add randomly chosen new pages
while len(group_pages) < max_page_count and available_page_ids:
new_page_id = choice(available_page_ids)
new_page_fpds = (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
page_id=new_page_id))
if new_page_fpds.count():
# We already have FlowPageData for this page, revive it
new_page_fpd, = new_page_fpds
assert new_page_fpd.id == new_page_id
else:
# Make a new FlowPageData instance
page_desc = find_page_desc(new_page_id)
assert page_desc.id == new_page_id
new_page_fpd = create_fpd(page_desc)
assert new_page_fpd.page_id == new_page_id
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
add_page(new_page_fpd)
else:
# reorder pages to order in flow
id_to_fpd = dict(
((fpd.group_id, fpd.page_id), fpd)
for fpd in FlowPageData.objects.filter(
flow_session=flow_session,
group_id=grp.id))
for page_desc in grp.pages:
key = (grp.id, page_desc.id)
if key in id_to_fpd:
fpd = id_to_fpd.pop(key)
else:
fpd = create_fpd(page_desc)
if len(group_pages) < max_page_count:
add_page(fpd)
for fpd in id_to_fpd.values():
remove_page(fpd)
# {{{ remove pages orphaned because of group renames
for fpd in (
FlowPageData.objects
.filter(
flow_session=flow_session,
ordinal__isnull=False)
.exclude(group_id__in=desc_group_ids)
):
remove_page(fpd)
# }}}
if flow_session.page_count != ordinal[0]:
flow_session.page_count = ordinal[0]
def adjust_flow_session_page_data(repo, flow_session,
course_identifier, flow_desc):
# The atomicity is not done as a decorator above because we can't import
# django.db at the module level here. The relate-validate script wants to
# import this module, and it obviously has no database.
from django.db import transaction
with transaction.atomic():
return _adjust_flow_session_page_data_inner(
repo, flow_session, course_identifier, flow_desc)
def get_course_commit_sha(course, participation):
sha = course.active_git_commit_sha
if participation is not None and participation.preview_git_commit_sha:
sha = participation.preview_git_commit_sha
return sha.encode()
def list_flow_ids(repo, commit_sha):
flow_ids = []
try:
flows_tree = get_repo_blob(repo, "flows", commit_sha)
except ObjectDoesNotExist:
# That's OK--no flows yet.
pass
else:
for entry in flows_tree.items():
flow_ids.append(entry.path[:-4])
return sorted(flow_ids)