Newer
Older
shown = True
if hasattr(rule, "shown"):
shown = rule.shown
return rule.weight, shown
def get_processed_page_chunks(course, repo, commit_sha,
page_desc, role, now_datetime, facilities):
for chunk in page_desc.chunks:
chunk.weight, chunk.shown = \
compute_chunk_weight_and_shown(
course, chunk, role, now_datetime,
facilities)
chunk.html_content = markup_to_html(course, repo, commit_sha, chunk.content)
if not hasattr(chunk, "title"):
from course.content import extract_title_from_markup
chunk.title = extract_title_from_markup(chunk.content)
page_desc.chunks.sort(key=lambda chunk: chunk.weight, reverse=True)
return [chunk for chunk in page_desc.chunks
# }}}
# {{{ repo desc getting
def normalize_page_desc(page_desc):
if hasattr(page_desc, "content"):
content = page_desc.content
from relate.utils import struct_to_dict, Struct
d = struct_to_dict(page_desc)
del d["content"]
d["chunks"] = [Struct({"id": "main", "content": content})]
return Struct(d)
return page_desc
def get_staticpage_desc(repo, course, commit_sha, filename):
page_desc = get_yaml_from_repo(repo, filename, commit_sha)
page_desc = normalize_page_desc(page_desc)
return page_desc
def get_course_desc(repo, course, commit_sha):
return get_staticpage_desc(repo, course, commit_sha, course.course_file)
def normalize_flow_desc(flow_desc):
if hasattr(flow_desc, "pages"):
pages = flow_desc.pages
from relate.utils import struct_to_dict, Struct
d = struct_to_dict(flow_desc)
del d["pages"]
d["groups"] = [Struct({"id": "main", "pages": pages})]
return Struct(d)
Andreas Klöckner
committed
if hasattr(flow_desc, "rules"):
rules = flow_desc.rules
if not hasattr(rules, "grade_identifier"):
# Legacy content with grade_identifier in grading rule,
# move first found grade_identifier up to rules.
Andreas Klöckner
committed
rules.grade_identifier = None
rules.grade_aggregation_strategy = None
Andreas Klöckner
committed
for grule in rules.grading:
if grule.grade_identifier is not None:
rules.grade_identifier = grule.grade_identifier
rules.grade_aggregation_strategy = \
grule.grade_aggregation_strategy
break
def get_flow_desc(repo, course, flow_id, commit_sha):
flow_desc = get_yaml_from_repo(repo, "flows/%s.yml" % flow_id, commit_sha)
flow_desc = normalize_flow_desc(flow_desc)
flow_desc.description_html = markup_to_html(
course, repo, commit_sha, getattr(flow_desc, "description", None))
return flow_desc
def get_flow_page_desc(flow_id, flow_desc, group_id, page_id):
for grp in flow_desc.groups:
if grp.id == group_id:
for page in grp.pages:
if page.id == page_id:
return page
raise ObjectDoesNotExist(
_("page '%(group_id)s/%(page_id)s' in flow '%(flow_id)s'") % {
'group_id': group_id,
'page_id': page_id,
'flow_id': flow_id
})
# }}}
# {{{ flow page handling
class ClassNotFoundError(RuntimeError):
def import_class(name):
components = name.split('.')
if len(components) < 2:
# need at least one module plus class name
raise ClassNotFoundError(name)
module_name = ".".join(components[:-1])
try:
mod = __import__(module_name)
except ImportError:
raise ClassNotFoundError(name)
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ClassNotFoundError(name)
def get_flow_page_class(repo, typename, commit_sha):
# look among default page types
import course.page
try:
return getattr(course.page, typename)
except AttributeError:
pass
# try a global dotted-name import
try:
return import_class(typename)
except ClassNotFoundError:
pass
if typename.startswith("repo:"):
stripped_typename = typename[5:]
components = stripped_typename.split(".")
raise ClassNotFoundError(
_("repo page class must conist of two "
"dotted components (invalid: '%s')")
% typename)
module_name = "code/"+module+".py"
module_code = get_repo_blob(repo, module_name, commit_sha,
allow_tree=False).data
exec(compile(module_code, module_name, 'exec'), module_dict)
try:
return module_dict[classname]
except AttributeError:
raise ClassNotFoundError(typename)
else:
raise ClassNotFoundError(typename)
def instantiate_flow_page(location, repo, page_desc, commit_sha):
class_ = get_flow_page_class(repo, page_desc.type, commit_sha)
return class_(None, location, page_desc)
# }}}
# {{{ page data wrangling
def _adjust_flow_session_page_data_inner(repo, flow_session,
course_identifier, flow_desc):
commit_sha = get_course_commit_sha(
flow_session.course, flow_session.participation)
if flow_session.page_data_at_revision_key == revision_key:
from course.page.base import PageContext
pctx = PageContext(
course=flow_session.course,
repo=repo,
commit_sha=commit_sha,
flow_session=flow_session,
in_sandbox=False,
page_uri=None)
from course.models import FlowPageData
def remove_page(fpd):
if fpd.ordinal is not None:
fpd.ordinal = None
fpd.save()
desc_group_ids = []
ordinal = [0]
for grp in flow_desc.groups:
desc_group_ids.append(grp.id)
shuffle = getattr(grp, "shuffle", False)
max_page_count = getattr(grp, "max_page_count", None)
available_page_ids = [page_desc.id for page_desc in grp.pages]
if max_page_count is None:
max_page_count = len(available_page_ids)
group_pages = []
# {{{ helper functions
def find_page_desc(page_id):
new_page_desc = None
for page_desc in grp.pages:
if page_desc.id == page_id:
new_page_desc = page_desc
break
assert new_page_desc is not None
return new_page_desc
def instantiate_page(page_desc):
return instantiate_flow_page(
"course '%s', flow '%s', page '%s/%s'"
% (course_identifier, flow_session.flow_id,
grp.id, page_desc.id),
repo, page_desc, commit_sha)
def create_fpd(new_page_desc):
page = instantiate_page(new_page_desc)
return FlowPageData(
flow_session=flow_session,
ordinal=None,
page_type=new_page_desc.type,
page_id=new_page_desc.id,
data=data,
title=page.title(pctx, data))
def add_page(fpd):
if fpd.ordinal != ordinal[0]:
fpd.ordinal = ordinal[0]
fpd.save()
page_desc = find_page_desc(fpd.page_id)
page = instantiate_page(page_desc)
title = page.title(pctx, fpd.data)
if fpd.title != title:
fpd.title = title
fpd.save()
ordinal[0] += 1
available_page_ids.remove(fpd.page_id)
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
group_pages.append(fpd)
# }}}
if shuffle:
# maintain order of existing pages as much as possible
for fpd in (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
ordinal__isnull=False)
.order_by("ordinal")):
if (fpd.page_id in available_page_ids
and len(group_pages) < max_page_count):
add_page(fpd)
else:
remove_page(fpd)
assert len(group_pages) <= max_page_count
from random import choice
# then add randomly chosen new pages
while len(group_pages) < max_page_count and available_page_ids:
new_page_id = choice(available_page_ids)
new_page_fpds = (FlowPageData.objects
.filter(
flow_session=flow_session,
group_id=grp.id,
page_id=new_page_id))
if new_page_fpds.count():
# We already have FlowPageData for this page, revive it
new_page_fpd, = new_page_fpds
assert new_page_fpd.page_id == new_page_id
else:
# Make a new FlowPageData instance
page_desc = find_page_desc(new_page_id)
assert page_desc.id == new_page_id
new_page_fpd = create_fpd(page_desc)
assert new_page_fpd.page_id == new_page_id
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
add_page(new_page_fpd)
else:
# reorder pages to order in flow
id_to_fpd = dict(
((fpd.group_id, fpd.page_id), fpd)
for fpd in FlowPageData.objects.filter(
flow_session=flow_session,
group_id=grp.id))
for page_desc in grp.pages:
key = (grp.id, page_desc.id)
if key in id_to_fpd:
fpd = id_to_fpd.pop(key)
else:
fpd = create_fpd(page_desc)
if len(group_pages) < max_page_count:
add_page(fpd)
for fpd in id_to_fpd.values():
remove_page(fpd)
# {{{ remove pages orphaned because of group renames
for fpd in (
FlowPageData.objects
.filter(
flow_session=flow_session,
ordinal__isnull=False)
.exclude(group_id__in=desc_group_ids)
):
remove_page(fpd)
# }}}
flow_session.page_count = ordinal[0]
flow_session.page_data_at_revision_key = revision_key
flow_session.save()
def adjust_flow_session_page_data(repo, flow_session,
course_identifier, flow_desc):
# The atomicity is not done as a decorator above because we can't import
# django.db at the module level here. The relate-validate script wants to
# import this module, and it obviously has no database.
from django.db import transaction
with transaction.atomic():
return _adjust_flow_session_page_data_inner(
repo, flow_session, course_identifier, flow_desc)
def get_course_commit_sha(course, participation):
sha = course.active_git_commit_sha
if participation is not None and participation.preview_git_commit_sha:
sha = participation.preview_git_commit_sha
return sha.encode()
def list_flow_ids(repo, commit_sha):
flow_ids = []
try:
flows_tree = get_repo_blob(repo, "flows", commit_sha)
except ObjectDoesNotExist:
# That's OK--no flows yet.
pass
else:
for entry in flows_tree.items():
flow_ids.append(entry.path[:-4])
return sorted(flow_ids)