Newer
Older
"<pre>%s</pre></p>"])
% escape(response.stdout))
if hasattr(response, "stderr") and response.stderr:
bulk_feedback_bits.append("".join([
"<p>",
_("Your code printed the following error messages"),
":"
"<pre>%s</pre></p>"]) % escape(response.stderr))
if hasattr(response, "figures") and response.figures:
fig_lines = ["".join([
"<p>",
_("Your code produced the following plots"),
":</p>"]),
'<dl class="result-figure-list">',
]
for nr, mime_type, b64data in response.figures:
if mime_type in ["image/jpeg", "image/png"]:
fig_lines.extend([
"".join([
"<dt>",
_("Figure"), "%d<dt>"]) % nr,
'<dd><img alt="Figure %d" src="data:%s;base64,%s"></dd>'
% (nr, mime_type, b64data)])
fig_lines.append("</dl>")
bulk_feedback_bits.extend(fig_lines)
if hasattr(response, "html") and response.html:
if (page_context.course is None
or not page_context.course.trusted_for_markup):
bulk_feedback_bits.extend(
sanitize_from_code_html(snippet)
for snippet in response.html)
else:
bulk_feedback_bits.extend(response.html)
return AnswerFeedback(
correctness=correctness,
feedback="\n".join(feedback_bits),
bulk_feedback="\n".join(bulk_feedback_bits))
def correct_answer(self, page_context, page_data, answer_data, grade_data):
result = ""
if hasattr(self.page_desc, "correct_code_explanation"):
result += markup_to_html(
page_context,
self.page_desc.correct_code_explanation)
if hasattr(self.page_desc, "correct_code"):
result += ("".join([
_("The following code is a valid answer"),
": <pre>%s</pre>"])
% escape(self.page_desc.correct_code))
def normalized_answer(self, page_context, page_data, answer_data):
if answer_data is None:
return None
normalized_answer = self.get_code_from_answer_data(answer_data)
from django.utils.html import escape
return f"<pre>{escape(normalized_answer)}</pre>"
def normalized_bytes_answer(self, page_context, page_data, answer_data):
return (suffix, self.get_code_from_answer_data(answer_data).encode("utf-8"))
Neal Davis
committed
# {{{ python code question
class PythonCodeQuestion(CodeQuestion, PageBaseWithoutHumanGrading):
Neal Davis
committed
"""
An auto-graded question allowing an answer consisting of Python code.
All user code as well as all code specified as part of the problem
is in Python 3.
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
Example:
.. code-block:: yaml
type: PythonCodeQuestion
id: addition
access_rules:
add_permissions:
- change_answer
value: 1
timeout: 10
prompt: |
# Adding two numbers in Python
Your code will receive two variables, *a* and *b*. Compute their sum and
assign it to *c*.
setup_code: |
import random
a = random.uniform(-10, 10)
b = random.uniform(-10, 10)
names_for_user: [a, b]
correct_code: |
c = a + b
names_from_user: [c]
test_code: |
if not isinstance(c, float):
feedback.finish(0, "Your computed c is not a float.")
correct_c = a + b
rel_err = abs(correct_c-c)/abs(correct_c)
if rel_err < 1e-7:
feedback.finish(1, "Your computed c was correct.")
else:
feedback.finish(0, "Your computed c was incorrect.")
Neal Davis
committed
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
If you are not including the
:attr:`course.constants.flow_permission.change_answer`
permission for your entire flow, you likely want to
include this snippet in your question definition:
.. code-block:: yaml
access_rules:
add_permissions:
- change_answer
This will allow participants multiple attempts at getting
the right answer.
.. attribute:: id
|id-page-attr|
.. attribute:: type
``PythonCodeQuestion``
.. attribute:: is_optional_page
|is-optional-page-attr|
.. attribute:: access_rules
|access-rules-page-attr|
.. attribute:: title
|title-page-attr|
.. attribute:: value
|value-page-attr|
.. attribute:: prompt
The page's prompt, written in :ref:`markup`.
.. attribute:: timeout
A number, giving the number of seconds for which setup code,
the given answer code, and the test code (combined) will be
allowed to run.
.. attribute:: setup_code
Optional.
Python code to prepare the environment for the participants
answer.
.. attribute:: show_setup_code
Optional. ``True`` or ``False``. If true, the :attr:`setup_code`
will be shown to the participant.
.. attribute:: names_for_user
Optional.
Symbols defined at the end of the :attr:`setup_code` that will be
made available to the participant's code.
A deep copy (using the standard library function :func:`copy.deepcopy`)
of these values is made, to prevent the user from modifying trusted
state of the grading code.
.. attribute:: names_from_user
Optional.
Symbols that the participant's code is expected to define.
These will be made available to the :attr:`test_code`.
.. attribute:: test_code
Optional.
Code that will be run to determine the correctness of a
student-provided solution. Will have access to variables in
:attr:`names_from_user` (which will be *None*) if not provided. Should
never raise an exception.
This may contain the marker "###CORRECT_CODE###", which will
be replaced with the contents of :attr:`correct_code`, with
each line indented to the same depth as where the marker
is found. The line with this marker is only allowed to have
white space and the marker on it.
.. attribute:: show_test_code
Optional. ``True`` or ``False``. If true, the :attr:`test_code`
will be shown to the participant.
.. attribute:: correct_code_explanation
Optional.
Code that is revealed when answers are visible
(see :ref:`flow-permissions`). This is shown before
:attr:`correct_code` as an explanation.
.. attribute:: correct_code
Optional.
Code that is revealed when answers are visible
(see :ref:`flow-permissions`).
.. attribute:: initial_code
Optional.
Code present in the code input field when the participant first starts
working on their solution.
.. attribute:: data_files
Optional.
A list of file names in the :ref:`git-repo` whose contents will be made
available to :attr:`setup_code` and :attr:`test_code` through the
``data_files`` dictionary. (see below)
.. attribute:: single_submission
Optional, a Boolean. If the question does not allow multiple submissions
based on its :attr:`access_rules` (not the ones of the flow), a warning
is shown. Setting this attribute to True will silence the warning.
The following symbols are available in :attr:`setup_code` and :attr:`test_code`:
* ``GradingComplete``: An exception class that can be raised to indicated
that the grading code has concluded.
* ``feedback``: A class instance with the following interface::
feedback.set_points(0.5) # 0<=points<=1 (usually)
feedback.add_feedback("This was wrong")
# combines the above two and raises GradingComplete
feedback.finish(0, "This was wrong")
feedback.check_numpy_array_sanity(name, num_axes, data)
feedback.check_numpy_array_features(name, ref, data, report_failure=True)
feedback.check_numpy_array_allclose(name, ref, data,
accuracy_critical=True, rtol=1e-5, atol=1e-8,
report_success=True, report_failure=True)
# If report_failure is True, this function will only return
# if *data* passes the tests. It will return *True* in this
# case.
#
# If report_failure is False, this function will always return,
# and the return value will indicate whether *data* passed the
# accuracy/shape/kind checks.
feedback.check_list(name, ref, data, entry_type=None)
feedback.check_scalar(name, ref, data, accuracy_critical=True,
rtol=1e-5, atol=1e-8, report_success=True, report_failure=True)
# returns True if accurate
feedback.call_user(f, *args, **kwargs)
# Calls a user-supplied function and prints an appropriate
# feedback message in case of failure.
* ``data_files``: A dictionary mapping file names from :attr:`data_files`
to :class:`bytes` instances with that file's contents.
* ``user_code``: The user code being tested, as a string.
"""
@property
def language_mode(self):
Neal Davis
committed
@property
def container_image(self):
return settings.RELATE_DOCKER_RUNPY_IMAGE
@property
def suffix(self):
Neal Davis
committed
def __init__(self, vctx, location, page_desc, language_mode="python"):
super().__init__(vctx, location, page_desc,
Neal Davis
committed
# }}}
# {{{ python code question with human feedback
class PythonCodeQuestionWithHumanTextFeedback(
PageBaseWithHumanTextFeedback, PythonCodeQuestion):
"""
A question allowing an answer consisting of Python code.
This page type allows both automatic grading and grading
by a human grader.
:attr:`course.constants.flow_permission.change_answer`
permission for your entire flow, you likely want to
include this snippet in your question definition:
.. code-block:: yaml
access_rules:
add_permissions:
- change_answer
This will allow participants multiple attempts at getting
the right answer.
Besides those defined in :class:`PythonCodeQuestion`, the
following additional, allowed/required attribute are introduced:
Supports automatic computation of point values from textual feedback.
See :ref:`points-from-feedback`.
.. attribute:: human_feedback_value
A number. The point value of the feedback component
by the human grader (who will grade on a 0-100 scale,
which is scaled to yield :attr:`human_feedback_value`
at 100).
.. attribute:: human_feedback_percentage
Optional.
A number. The percentage the feedback by the human
grader takes in the overall grade. Noticing that
either this attribute or :attr:`human_feedback_value`
must be included. `
.. attribute:: rubric
Required.
The grading guideline for this question (for the human-graded component
of the question), in :ref:`markup`.
"""
def __init__(self, vctx, location, page_desc):
super().__init__(
vctx, location, page_desc)
if (
hasattr(self.page_desc, "human_feedback_value")
and hasattr(self.page_desc, "human_feedback_percentage")):
raise ValidationError(
string_concat(
"%(location)s: ",
_("'human_feedback_value' and "
"'human_feedback_percentage' are not "
"allowed to coexist"))
)
if not (hasattr(self.page_desc, "human_feedback_value")
or hasattr(self.page_desc, "human_feedback_percentage")):
raise ValidationError(
string_concat(
"%(location)s: ",
_("expecting either 'human_feedback_value' "
"or 'human_feedback_percentage', found neither."))
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
)
if hasattr(self.page_desc, "human_feedback_value"):
vctx.add_warning(
location,
_("Used deprecated 'human_feedback_value' attribute--"
"use 'human_feedback_percentage' instead."))
if self.page_desc.value == 0:
raise ValidationError("".join([
"%s: ",
_("'human_feedback_value' attribute is not allowed "
"if value of question is 0, use "
"'human_feedback_percentage' instead")])
% location)
if self.page_desc.human_feedback_value > self.page_desc.value:
raise ValidationError("".join([
"%s: ",
_("human_feedback_value greater than overall "
"value of question")])
% location)
if hasattr(self.page_desc, "human_feedback_percentage"):
if not (
0 <= self.page_desc.human_feedback_percentage <= 100):
raise ValidationError("".join([
"%s: ",
_("the value of human_feedback_percentage "
"must be between 0 and 100")])
% location)
if hasattr(self.page_desc, "human_feedback_value"):
self.human_feedback_percentage = (
self.page_desc.human_feedback_value * 100 / self.page_desc.value)
self.human_feedback_percentage = (
self.page_desc.human_feedback_percentage)
def required_attrs(self):
return (
*super().required_attrs(),
# value is otherwise optional, but we require it here
("value", (int, float)),
)
return (
*super().allowed_attrs(),
("human_feedback_value", (int, float)),
("human_feedback_percentage", (int, float)))
def human_feedback_point_value(self, page_context, page_data):
return self.page_desc.value * self.human_feedback_percentage / 100
def grade(self, page_context, page_data, answer_data, grade_data):
if answer_data is None:
return AnswerFeedback(correctness=0,
if grade_data is not None and not grade_data["released"]:
grade_data = None
code_feedback = PythonCodeQuestion.grade(self, page_context,
page_data, answer_data, grade_data)
human_points = self.human_feedback_point_value(page_context, page_data)
code_points = self.page_desc.value - human_points
correctness = None
percentage = None
if (code_feedback is not None
and code_feedback.correctness is not None
and grade_data is not None
and grade_data["grade_percent"] is not None):
code_feedback_percentage = 100 - self.human_feedback_percentage
percentage = (
code_feedback.correctness * code_feedback_percentage
+ grade_data["grade_percent"] / 100
* self.human_feedback_percentage
)
correctness = percentage / 100
elif (self.human_feedback_percentage == 100
and grade_data is not None
and grade_data["grade_percent"] is not None):
correctness = grade_data["grade_percent"] / 100
percentage = correctness * 100
elif (self.human_feedback_percentage == 0
and code_feedback.correctness is not None):
correctness = code_feedback.correctness
percentage = correctness * 100
human_feedback_text = None
human_feedback_points = None
assert grade_data["feedback_text"] is not None
if grade_data["feedback_text"].strip():
human_feedback_text = markup_to_html(
page_context, grade_data["feedback_text"])
human_graded_percentage = grade_data["grade_percent"]
if human_graded_percentage is not None:
human_feedback_points = (human_graded_percentage/100.
* human_points)
code_feedback_points = None
if (code_feedback is not None
and code_feedback.correctness is not None):
code_feedback_points = code_feedback.correctness*code_points
from django.template.loader import render_to_string
feedback = render_to_string(
"course/feedback-code-with-human.html",
{
"percentage": percentage,
"code_feedback": code_feedback,
"code_feedback_points": code_feedback_points,
"code_points": code_points,
"human_feedback_text": human_feedback_text,
"human_feedback_points": human_feedback_points,
"human_points": human_points,
})
return AnswerFeedback(
correctness=correctness,
feedback=feedback,
bulk_feedback=code_feedback.bulk_feedback)
# vim: foldmethod=marker