Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
A
ARFA
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
ARFA
ARFA
Commits
2f5b725a
Commit
2f5b725a
authored
Nov 25, 2018
by
NARRA SURAJ
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
updating question stats on eval
parent
4e3b5104
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
44 additions
and
9 deletions
+44
-9
ARFA/ARFA_app/migrations/0004_auto_20181125_1807.py
ARFA/ARFA_app/migrations/0004_auto_20181125_1807.py
+30
-0
ARFA/ARFA_app/models.py
ARFA/ARFA_app/models.py
+2
-2
ARFA/ARFA_app/utilities.py
ARFA/ARFA_app/utilities.py
+12
-7
No files found.
ARFA/ARFA_app/migrations/0004_auto_20181125_1807.py
0 → 100644
View file @
2f5b725a
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-25 18:07
from
__future__
import
unicode_literals
from
django.db
import
migrations
,
models
class
Migration
(
migrations
.
Migration
):
dependencies
=
[
(
'ARFA_app'
,
'0003_auto_20181125_1102'
),
]
operations
=
[
migrations
.
AddField
(
model_name
=
'question'
,
name
=
'num_appeared'
,
field
=
models
.
IntegerField
(
default
=
0
),
),
migrations
.
AddField
(
model_name
=
'question'
,
name
=
'num_correct'
,
field
=
models
.
IntegerField
(
default
=
0
),
),
migrations
.
AlterField
(
model_name
=
'test'
,
name
=
'max_marks'
,
field
=
models
.
IntegerField
(
null
=
True
),
),
]
ARFA/ARFA_app/models.py
View file @
2f5b725a
...
@@ -86,8 +86,8 @@ class Question(models.Model):
...
@@ -86,8 +86,8 @@ class Question(models.Model):
difficulty_observed
=
models
.
CharField
(
max_length
=
20
)
difficulty_observed
=
models
.
CharField
(
max_length
=
20
)
question_text
=
models
.
CharField
(
max_length
=
1000
)
question_text
=
models
.
CharField
(
max_length
=
1000
)
image_link
=
models
.
CharField
(
max_length
=
200
)
image_link
=
models
.
CharField
(
max_length
=
200
)
num_appeared
=
models
.
IntegerField
num_appeared
=
models
.
IntegerField
(
default
=
0
)
num_correct
=
models
.
IntegerField
num_correct
=
models
.
IntegerField
(
default
=
0
)
visibility
=
models
.
CharField
(
max_length
=
20
)
visibility
=
models
.
CharField
(
max_length
=
20
)
ownership
=
models
.
CharField
(
max_length
=
20
)
ownership
=
models
.
CharField
(
max_length
=
20
)
...
...
ARFA/ARFA_app/utilities.py
View file @
2f5b725a
...
@@ -17,6 +17,7 @@ from django.views.decorators.cache import cache_control
...
@@ -17,6 +17,7 @@ from django.views.decorators.cache import cache_control
import
json
import
json
from
django.db
import
connection
from
django.db
import
connection
from
django.db
import
IntegrityError
from
django.db
import
IntegrityError
from
django.db.models
import
F
from
pprint
import
pprint
from
pprint
import
pprint
import
datetime
import
datetime
...
@@ -48,6 +49,7 @@ def evaluate(responses, otherData):
...
@@ -48,6 +49,7 @@ def evaluate(responses, otherData):
marksDict
[
c
.
q_ID_id
]
=
[
c
.
marks_pos
,
c
.
marks_neg
]
marksDict
[
c
.
q_ID_id
]
=
[
c
.
marks_pos
,
c
.
marks_neg
]
totalMarks
=
0
totalMarks
=
0
correctQList
=
[]
#list of correctly attempted questions
correctAnswer
=
None
correctAnswer
=
None
for
response
in
responses
:
for
response
in
responses
:
evalObj
=
Evaluation
()
evalObj
=
Evaluation
()
...
@@ -68,17 +70,12 @@ def evaluate(responses, otherData):
...
@@ -68,17 +70,12 @@ def evaluate(responses, otherData):
# evalObj.marks = 1
# evalObj.marks = 1
responseList
=
options
responseList
=
options
# print "correctanswer"
# print correctAnswer
# print correctAnswer.values('pk')
# correctAnswer = list(correctAnswer.values('pk'))
# correctAnswer = list(map(lambda x: x['pk'], correctAnswer))
print
correctAnswer
print
correctAnswer
#correct answer
if
len
(
responseList
)
==
len
(
correctAnswer
)
and
sorted
(
responseList
)
==
sorted
(
correctAnswer
):
if
len
(
responseList
)
==
len
(
correctAnswer
)
and
sorted
(
responseList
)
==
sorted
(
correctAnswer
):
evalObj
.
marks
=
marksDict
[
question
][
0
]
#0 - first in the list, marks_pos
evalObj
.
marks
=
marksDict
[
question
][
0
]
#0 - first in the list, marks_pos
totalMarks
=
totalMarks
+
evalObj
.
marks
totalMarks
=
totalMarks
+
evalObj
.
marks
correctQList
.
append
(
question
)
#add to list of correctly attempted questions
#else, it is a wrong answer since no attempt results in no response
#else, it is a wrong answer since no attempt results in no response
else
:
else
:
evalObj
.
marks
=
marksDict
[
question
][
1
]
#1 - second in the list, marks_neg
evalObj
.
marks
=
marksDict
[
question
][
1
]
#1 - second in the list, marks_neg
...
@@ -90,6 +87,14 @@ def evaluate(responses, otherData):
...
@@ -90,6 +87,14 @@ def evaluate(responses, otherData):
print
"Integrity error - evaluation not recorded"
print
"Integrity error - evaluation not recorded"
pass
pass
#update questions' data
#https://docs.djangoproject.com/en/2.1/topics/db/queries/#updating-multiple-objects-at-once
models
.
Question
.
objects
.
filter
(
q_ID__in
=
QList
)
.
update
(
num_appeared
=
F
(
'num_appeared'
)
+
1
)
models
.
Question
.
objects
.
filter
(
q_ID__in
=
correctQList
)
.
update
(
num_correct
=
F
(
'num_correct'
)
+
1
)
#write takesObj
takesObj
=
Takes
()
takesObj
=
Takes
()
takesObj
.
student_id
=
otherData
[
'username'
]
takesObj
.
student_id
=
otherData
[
'username'
]
takesObj
.
test_ID_id
=
otherData
[
'testID'
]
takesObj
.
test_ID_id
=
otherData
[
'testID'
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment