Commit 16051a1c authored by shreyansh's avatar shreyansh

added essay evaluator

parent a2c4c811
name: Softlab
channels:
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- ca-certificates=2019.10.16=0
- certifi=2019.9.11=py37_0
- libedit=3.1.20181209=hc058e9b_0
- libffi=3.2.1=hd88cf55_4
- libgcc-ng=9.1.0=hdf63c60_0
- libstdcxx-ng=9.1.0=hdf63c60_0
- ncurses=6.1=he6710b0_1
- openssl=1.1.1d=h7b6447c_3
- pip=19.3.1=py37_0
- python=3.7.4=h265db76_1
- readline=7.0=h7b6447c_5
- setuptools=41.4.0=py37_0
- sqlite=3.30.1=h7b6447c_0
- tk=8.6.8=hbc83047_0
- wheel=0.33.6=py37_0
- xz=5.2.4=h14c3975_4
- zlib=1.2.11=h7b6447c_3
- pip:
- absl-py==0.8.1
- astor==0.8.0
- boto==2.49.0
- boto3==1.10.4
- botocore==1.13.4
- chardet==3.0.4
- docutils==0.15.2
- gast==0.2.2
- gensim==3.8.1
- google-pasta==0.1.7
- grpcio==1.24.3
- h5py==2.10.0
- idna==2.8
- jmespath==0.9.4
- keras-applications==1.0.8
- keras-preprocessing==1.1.0
- markdown==3.1.1
- nltk==3.4.5
- numpy==1.17.3
- opt-einsum==3.1.0
- protobuf==3.10.0
- python-dateutil==2.8.0
- requests==2.22.0
- s3transfer==0.2.1
- scipy==1.3.1
- six==1.12.0
- smart-open==1.8.4
- tensorboard==2.0.0
- tensorflow==2.0.0
- tensorflow-estimator==2.0.1
- termcolor==1.1.0
- urllib3==1.25.6
- werkzeug==0.16.0
- wrapt==1.11.2
prefix: /home/shreyansh/.conda/envs/Softlab
......@@ -27,7 +27,28 @@ app.get('/', (req, res) => {
});
app.post('/evaluate', (req, res) => {
res.send('Hello world\n');
var essay = req.body.essay;
new Promise(function(resolve, reject) {
const { spawn } = require('child_process');
const predict = spawn('python3',['prediction.py',essay]);
predict.stdout.on('data', function(data) {
data = new Buffer.from(data, 'base64').toString("ascii")
resolve(data);
});
predict.stderr.on('data', (data) => {
data = new Buffer.from(data, 'base64').toString("ascii")
reject(data);
});
})
.then((marks)=>{
res.send(marks)
})
.catch((err)=>{
console.log(err)
})
});
......
......@@ -9,7 +9,8 @@
},
"dependencies": {
"express": "^4.16.1",
"request": "^2.88.0",
"googleapis": "^44.0.0"
"googleapis": "^44.0.0",
"plagiarism-checker": "^1.2.1",
"request": "^2.88.0"
}
}
var request = require('request');
var CopyleaksCloud = require('plagiarism-checker');
var _ = require('lodash');
var clCloud = new CopyleaksCloud();
var config = clCloud.getConfig();
var credentials = {
"Email": "sjain0615@gmail.com",
"ApiKey": "89893811-8da7-432b-8f40-7d4eb6456693"
}
let access_token;
var essay = 'yes i am on moonlight. yes i am on moonlight yes .i am on moonlight yes i am on moonlightyes i am on moonlight. yes i am on moonlight. yes i am on moonlight';
clCloud.login(credentials.Email,credentials.ApiKey,'education',function(resp,err){
if(!err){
access_token = _.get(resp,'access_token','');
console.log(resp);
var _customHeaders = {};
_customHeaders[config.SANDBOX_MODE_HEADER] = true; // Sandbox mode - Scan without consuming any credits and get back dummy results
_customHeaders[config.HTTP_CALLBACK] = 'http://requestb.in/callbacks/' // Callback url - For a fast testing of callbacks option we recommend to use http://requestb.in
clCloud.createByText(essay,_customHeaders,function(resp,err){
console.log(resp)
if(resp && resp.ProcessId){
let PID = resp.ProcessId
let Status = 'Processing'
clCloud.getProcessStatus(PID,function(resp,err){
console.log(resp.Status,resp)
if(resp.Status==='Finished'){
clCloud.getProcessResults(PID,function(resp,err){
console.log(resp);
setTimeout(function(){
})
},15);
}
else{
clCloud.getProcessResults(PID,function(resp,err){
console.log(resp.results);
setTimeout(function(){
},15);
})
}
})
}
});
}
})
// var pid = '4decf855-60d6-4b9e-985b-4bfcfdb6bd63'
// function setToken(){
// var headers = {
// "Content-type": "application/json"
// };
// var options = {
// url: "https://id.copyleaks.com/v1/account/login-api",
// method: "POST",
// headers: headers,
// body: dataString,
// json: true
// };
// return new Promise(function(resolve,reject){
// clCloud.login(options, function(error, response, body) {
// if (!error && response.statusCode == 200)
// resolve(body.access_token)
// else
// reject(error)
// })
// })
// }
// function scanFile(token, essay){
// var headers = {
// 'Authorization': `Bearer ${token}`,
// 'Content-type': "application/json"
// };
// var options = {
// url: "https://id.copyleaks.com/v2/education/4d7728fb-ed3e-4c77-8a2c-07783cfad046/result",
// method: "GET",
// headers: headers,
// body: essay
// };
// request(options,function(error, response, body) {
// if (!error && response.statusCode == 200) {
// console.log(body);
// }
// });
// }
// setToken()
// .then((token)=>{
// scanFile(token,essay)
// })
// .catch((err)=>console.log('err '+err))
import os
import re
import sys
import nltk
import warnings
import numpy as np
from tensorflow import keras
from nltk.corpus import stopwords
from gensim.models import KeyedVectors
# nltk.download('stopwords')
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def tokenizeEssay(essay):
""" Adding everything except stopwords to wordsList"""
essay = re.sub("[^a-zA-Z]", " ", essay)
words = essay.lower().split()
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
return (words)
def makeFeatureVec(words, model, num_features):
"""Make Feature Vector from the words list of an Essay."""
featureVec = np.zeros((num_features,),dtype="float32")
num_words = 0.
index2word_set = set(model.index2word)
for word in words:
if word in index2word_set:
num_words += 1
featureVec = np.add(featureVec,model[word])
featureVec = np.divide(featureVec,num_words)
return featureVec
def getAvgFeatureVecs(essays, model, num_features):
"""Main function to generate the word vectors for word2vec model."""
counter = 0
essayFeatureVecs = np.zeros((len(essays),num_features),dtype="float32")
for essay in essays:
essayFeatureVecs[counter] = makeFeatureVec(essay, model, num_features)
counter = counter + 1
return essayFeatureVecs
model = KeyedVectors.load_word2vec_format('models/word2vecmodel.bin', binary=True)
essay_tokens = []
essay = sys.argv[1]
essay_tokens.append(tokenizeEssay(essay))
testEssayVec = getAvgFeatureVecs(essay_tokens, model, 300)
testEssayVec = np.array(testEssayVec)
testEssayVec = np.reshape(testEssayVec,(testEssayVec.shape[0], 1, testEssayVec.shape[1]))
evaluator = keras.models.load_model('models/final_lstm.h5')
y_pred = evaluator.predict(testEssayVec)
y_pred = np.around(y_pred)
print(y_pred.reshape(-1)[0])
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment