Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
A
ARFA
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
ARFA
ARFA
Commits
27707f99
Commit
27707f99
authored
Nov 26, 2018
by
Rahul-chunduru
Browse files
Options
Browse Files
Download
Plain Diff
merged
parents
7013fd64
440a0a66
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
90 additions
and
21 deletions
+90
-21
ARFA/ARFA_app/migrations/0004_auto_20181125_1807.py
ARFA/ARFA_app/migrations/0004_auto_20181125_1807.py
+30
-0
ARFA/ARFA_app/models.py
ARFA/ARFA_app/models.py
+2
-2
ARFA/ARFA_app/utilities.py
ARFA/ARFA_app/utilities.py
+58
-19
No files found.
ARFA/ARFA_app/migrations/0004_auto_20181125_1807.py
0 → 100644
View file @
27707f99
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-25 18:07
from
__future__
import
unicode_literals
from
django.db
import
migrations
,
models
class
Migration
(
migrations
.
Migration
):
dependencies
=
[
(
'ARFA_app'
,
'0003_auto_20181125_1102'
),
]
operations
=
[
migrations
.
AddField
(
model_name
=
'question'
,
name
=
'num_appeared'
,
field
=
models
.
IntegerField
(
default
=
0
),
),
migrations
.
AddField
(
model_name
=
'question'
,
name
=
'num_correct'
,
field
=
models
.
IntegerField
(
default
=
0
),
),
migrations
.
AlterField
(
model_name
=
'test'
,
name
=
'max_marks'
,
field
=
models
.
IntegerField
(
null
=
True
),
),
]
ARFA/ARFA_app/models.py
View file @
27707f99
...
@@ -85,8 +85,8 @@ class Question(models.Model):
...
@@ -85,8 +85,8 @@ class Question(models.Model):
difficulty_set
=
models
.
CharField
(
max_length
=
20
)
difficulty_set
=
models
.
CharField
(
max_length
=
20
)
difficulty_observed
=
models
.
CharField
(
max_length
=
20
)
difficulty_observed
=
models
.
CharField
(
max_length
=
20
)
question_text
=
models
.
CharField
(
max_length
=
1000
)
question_text
=
models
.
CharField
(
max_length
=
1000
)
image_link
=
models
.
CharField
(
max_length
=
200
)
image_link
=
models
.
CharField
(
max_length
=
200
)
num_appeared
=
models
.
IntegerField
(
default
=
0
)
num_appeared
=
models
.
IntegerField
(
default
=
0
)
num_correct
=
models
.
IntegerField
(
default
=
0
)
num_correct
=
models
.
IntegerField
(
default
=
0
)
visibility
=
models
.
CharField
(
max_length
=
20
)
visibility
=
models
.
CharField
(
max_length
=
20
)
ownership
=
models
.
CharField
(
max_length
=
20
)
ownership
=
models
.
CharField
(
max_length
=
20
)
...
...
ARFA/ARFA_app/utilities.py
View file @
27707f99
...
@@ -9,6 +9,7 @@ from .models import Question
...
@@ -9,6 +9,7 @@ from .models import Question
from
.models
import
Options
from
.models
import
Options
from
.models
import
Student
from
.models
import
Student
from
.models
import
Evaluation
from
.models
import
Evaluation
from
.models
import
Takes
from
django.contrib.auth.decorators
import
login_required
from
django.contrib.auth.decorators
import
login_required
from
django.http
import
HttpResponse
from
django.http
import
HttpResponse
...
@@ -16,25 +17,45 @@ from django.views.decorators.cache import cache_control
...
@@ -16,25 +17,45 @@ from django.views.decorators.cache import cache_control
import
json
import
json
from
django.db
import
connection
from
django.db
import
connection
from
django.db
import
IntegrityError
from
django.db
import
IntegrityError
from
django.db.models
import
F
from
pprint
import
pprint
import
datetime
def
evaluate
(
responses
,
otherData
):
def
evaluate
(
responses
,
otherData
):
"""responses contains a list of dicts of the form {q_ID: [option1, option2 ..]}
other data contains any other data necessary for evaluation such as testID and studentID"""
#create evaluation object
#create evaluation object
evalObj
=
Evaluation
()
evalObj
=
Evaluation
()
if
len
(
responses
)
==
0
:
if
len
(
responses
)
==
0
:
return
return
# attemptObj = Attempt()
#debugging:
# attempt.student = Student.objects.get(username=request.session['username'])
print
"responses:"
# attemptObj.test = responses[0].test_ID
print
responses
# attemptObj.save()
evalObj
.
student_id
=
otherData
[
'username'
]
#marks for each question:
evalObj
.
test_ID_id
=
int
(
otherData
[
'testID'
])
#list of question ids
print
"QList"
QList
=
[
int
(
question
)
for
[(
question
,
_
)]
in
[
response
.
items
()
for
response
in
responses
]]
print
QList
#select contains objects for which test_ID=tetsID, q_ID belongs to list QList
contains
=
models
.
Contains
.
objects
.
filter
(
test_ID
=
otherData
[
'testID'
],
q_ID__in
=
QList
)
#put these in a dictionary {q_ID:[marks_pos, marks_neg]}
marksDict
=
{}
for
c
in
contains
:
marksDict
[
c
.
q_ID_id
]
=
[
c
.
marks_pos
,
c
.
marks_neg
]
# evalObj.attempt = attemptObj
totalMarks
=
0
evalObj
.
q_ID
=
None
correctQList
=
[]
#list of correctly attempted questions
correctAnswer
=
None
correctAnswer
=
None
for
response
in
responses
:
for
response
in
responses
:
evalObj
=
Evaluation
()
evalObj
.
student_id
=
otherData
[
'username'
]
evalObj
.
test_ID_id
=
int
(
otherData
[
'testID'
])
evalObj
.
q_ID
=
None
(
question
,
options
),
=
response
.
items
()
(
question
,
options
),
=
response
.
items
()
question
=
int
(
question
)
question
=
int
(
question
)
# options = responses[question]
# options = responses[question]
...
@@ -49,24 +70,42 @@ def evaluate(responses, otherData):
...
@@ -49,24 +70,42 @@ def evaluate(responses, otherData):
# evalObj.marks = 1
# evalObj.marks = 1
responseList
=
options
responseList
=
options
# print "correctanswer"
# print correctAnswer
# print correctAnswer.values('pk')
# correctAnswer = list(correctAnswer.values('pk'))
# correctAnswer = list(map(lambda x: x['pk'], correctAnswer))
print
correctAnswer
print
correctAnswer
if
len
(
responseList
)
==
len
(
correctAnswer
)
and
sorted
(
responseList
)
==
sorted
(
correctAnswer
):
if
len
(
responseList
)
==
len
(
correctAnswer
)
and
sorted
(
responseList
)
==
sorted
(
correctAnswer
):
evalObj
.
marks
=
1
evalObj
.
marks
=
marksDict
[
question
][
0
]
#0 - first in the list, marks_pos
# print "responses"
totalMarks
=
totalMarks
+
evalObj
.
marks
# print responseList
correctQList
.
append
(
question
)
#add to list of correctly attempted questions
# print correctAnswer
#else, it is a wrong answer since no attempt results in no response
# print evalObj
else
:
evalObj
.
marks
=
marksDict
[
question
][
1
]
#1 - second in the list, marks_neg
totalMarks
=
totalMarks
+
evalObj
.
marks
try
:
try
:
evalObj
.
save
()
evalObj
.
save
()
except
IntegrityError
as
e
:
except
IntegrityError
as
e
:
print
"Integrity error - evaluation not recorded"
print
"Integrity error - evaluation not recorded"
pass
pass
#update questions' data
#https://docs.djangoproject.com/en/2.1/topics/db/queries/#updating-multiple-objects-at-once
models
.
Question
.
objects
.
filter
(
q_ID__in
=
QList
)
.
update
(
num_appeared
=
F
(
'num_appeared'
)
+
1
)
models
.
Question
.
objects
.
filter
(
q_ID__in
=
correctQList
)
.
update
(
num_correct
=
F
(
'num_correct'
)
+
1
)
#write takesObj
takesObj
=
Takes
()
takesObj
.
student_id
=
otherData
[
'username'
]
takesObj
.
test_ID_id
=
otherData
[
'testID'
]
takesObj
.
time_stamp
=
datetime
.
datetime
.
now
()
takesObj
.
total_marks
=
totalMarks
try
:
takesObj
.
save
()
except
IntegrityError
as
e
:
print
"Integrity error in Takes - not recorded"
pass
def
getOptions
(
question
):
#returns queryset
def
getOptions
(
question
):
#returns queryset
return
models
.
Options
.
objects
.
get
(
q_ID
=
question
)
return
models
.
Options
.
objects
.
get
(
q_ID
=
question
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment