summaryrefslogtreecommitdiff
path: root/gnowsys-ndf/gnowsys_ndf
diff options
context:
space:
mode:
Diffstat (limited to 'gnowsys-ndf/gnowsys_ndf')
-rw-r--r--gnowsys-ndf/gnowsys_ndf/benchmarker/benchmarkreport.py4
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/models.py22
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/templatetags/ndf_tags.py28
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/urls/user.py1
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/ajax_views.py68
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/e-library.py2
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/file.py22
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/group.py34
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/methods.py109
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/mis.py314
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/page.py24
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/person.py15
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/search_views.py51
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/topics.py15
-rw-r--r--gnowsys-ndf/gnowsys_ndf/ndf/views/userDashboard.py43
15 files changed, 526 insertions, 226 deletions
diff --git a/gnowsys-ndf/gnowsys_ndf/benchmarker/benchmarkreport.py b/gnowsys-ndf/gnowsys_ndf/benchmarker/benchmarkreport.py
index 9fa9c01..d25eebe 100644
--- a/gnowsys-ndf/gnowsys_ndf/benchmarker/benchmarkreport.py
+++ b/gnowsys-ndf/gnowsys_ndf/benchmarker/benchmarkreport.py
@@ -10,13 +10,13 @@ import json
'''
db = get_database()
col = db[Benchmark.collection_name]
-
+
def report(request):
date1=datetime.date.today()
ti=time(0,0)
listofmethods = []
Today=datetime.datetime.combine(date1,ti)
- bench_cur = col.find({'last_update':{'$gte':Today}}).sort('last_update', -1).sort('time_taken',-1)
+ bench_cur = col.find({'last_update':{'$gte':Today}}).sort('last_update', -1)
search_cur = []
if request.method == "POST":
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/models.py b/gnowsys-ndf/gnowsys_ndf/ndf/models.py
index 37b8894..bb69cda 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/models.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/models.py
@@ -235,7 +235,16 @@ class Node(DjangoDocument):
# ready.
default_values = {'created_at': datetime.datetime.utcnow, 'status': u'DRAFT'}
use_dot_notation = True
-
+"""
+ indexes=[
+ {
+ 'fields':['_type','name'],
+ },
+ {
+ 'fields':['member_of','group_set'],
+ },
+ ]
+"""
########## Setter(@x.setter) & Getter(@property) ##########
@property
@@ -1560,7 +1569,16 @@ class Triple(DjangoDocument):
'lang': basestring, # Put validation for standard language codes
'status': STATUS_CHOICES_TU
}
-
+ """
+ indexes=[
+ {
+ 'fields':['_type','name'],
+ },
+ {
+ 'fields':['_type','subject'],
+ }
+ ]
+"""
required_fields = ['name', 'subject']
use_dot_notation = True
use_autorefs = True
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/templatetags/ndf_tags.py b/gnowsys-ndf/gnowsys_ndf/ndf/templatetags/ndf_tags.py
index 29f1c99..8be58d3 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/templatetags/ndf_tags.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/templatetags/ndf_tags.py
@@ -5,6 +5,7 @@ from collections import OrderedDict
from time import time
import json
import ox
+import multiprocessing as mp
''' -- imports from installed packages -- '''
from django.contrib.auth.models import User
@@ -772,11 +773,10 @@ def thread_reply_count( oid ):
global_thread_latest_reply["content_org"] = each.content_org
global_thread_latest_reply["last_update"] = each.last_update
global_thread_latest_reply["user"] = User.objects.get(pk=each.created_by).username
- else:
- if global_thread_latest_reply["last_update"] < each.last_update:
- global_thread_latest_reply["content_org"] = each.content_org
- global_thread_latest_reply["last_update"] = each.last_update
- global_thread_latest_reply["user"] = User.objects.get(pk=each.created_by).username
+ elif global_thread_latest_reply["last_update"] < each.last_update:
+ global_thread_latest_reply["content_org"] = each.content_org
+ global_thread_latest_reply["last_update"] = each.last_update
+ global_thread_latest_reply["user"] = User.objects.get(pk=each.created_by).username
thread_reply_count(each._id)
@@ -1206,10 +1206,10 @@ def get_prior_node(node_id):
if topic_GST._id in obj.member_of:
if obj.prior_node:
- for each in obj.prior_node:
- node = node_collection.one({'_id': ObjectId(each) })
- prior.append(( node._id , node.name ))
-
+ # for each in obj.prior_node:
+# node = node_collection.one({'_id': ObjectId(each) })
+ # prior.append(( node._id , node.name ))
+ prior=[(node_collection.one({'_id': ObjectId(each) })._id,node_collection.one({'_id': ObjectId(each) }).name) for each in obj.prior_node]
return prior
return prior
@@ -2107,8 +2107,10 @@ def get_source_id(obj_id):
@get_execution_time
def get_translation_relation(obj_id, translation_list = [], r_list = []):
get_translation_rt = node_collection.one({'$and':[{'_type':'RelationType'},{'name':u"translation_of"}]})
+ r_list_append_temp=r_list.append #a temp. variable which stores the lookup for append method
+ translation_list_append_temp=translation_list.append#a temp. variable which stores the lookup
if obj_id not in r_list:
- r_list.append(obj_id)
+ r_list_append_temp(obj_id)
node_sub_rt = triple_collection.find({'$and':[{'_type':"GRelation"},{'relation_type.$id':get_translation_rt._id},{'subject':obj_id}]})
node_rightsub_rt = triple_collection.find({'$and':[{'_type':"GRelation"},{'relation_type.$id':get_translation_rt._id},{'right_subject':obj_id}]})
@@ -2117,20 +2119,20 @@ def get_translation_relation(obj_id, translation_list = [], r_list = []):
for each in list(node_sub_rt):
right_subject = node_collection.one({'_id':each.right_subject})
if right_subject._id not in r_list:
- r_list.append(right_subject._id)
+ r_list_append_temp(right_subject._id)
if list(node_rightsub_rt):
node_rightsub_rt.rewind()
for each in list(node_rightsub_rt):
right_subject = node_collection.one({'_id':each.subject})
if right_subject._id not in r_list:
- r_list.append(right_subject._id)
+ r_list_append_temp(right_subject._id)
if r_list:
r_list.remove(obj_id)
for each in r_list:
dic={}
node = node_collection.one({'_id':each})
dic[node._id]=node.language
- translation_list.append(dic)
+ translation_list_append_temp(dic)
get_translation_relation(each,translation_list, r_list)
return translation_list
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/urls/user.py b/gnowsys-ndf/gnowsys_ndf/ndf/urls/user.py
index 103025a..a916345 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/urls/user.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/urls/user.py
@@ -8,4 +8,3 @@ urlpatterns = patterns('gnowsys_ndf.ndf.views.userDashboard',
url(r'^useractivity', 'user_activity', name='user_activity'),
url(r'^user_preference/(?P<auth_id>[\w-]+)$','user_preferences',name='user_preferences'),
)
-
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/ajax_views.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/ajax_views.py
index 9ef16e0..0c3b742 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/ajax_views.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/ajax_views.py
@@ -7,7 +7,7 @@ import time
import ast
import json
import math
-import multiprocessing
+import multiprocessing
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect
@@ -637,7 +637,7 @@ def get_topic_contents(request, group_id):
def get_collection_list(collection_list, node):
inner_list = []
error_list = []
-
+ inner_list_append_temp=inner_list.append #a temp. variable which stores the lookup for append method
if node.collection_set:
for each in node.collection_set:
col_obj = node_collection.one({'_id': ObjectId(each)})
@@ -651,9 +651,9 @@ def get_collection_list(collection_list, node):
inner_sub_list = get_collection_list(inner_sub_list, col_obj)
if inner_sub_list:
- inner_list.append(inner_sub_list[0])
+ inner_list_append_temp(inner_sub_list[0])
else:
- inner_list.append(inner_sub_dict)
+ inner_list_append_temp(inner_sub_dict)
cl.update({'children': inner_list })
else:
@@ -723,7 +723,7 @@ def get_tree_hierarchy(request, group_id, node_id):
def get_inner_collection(collection_list, node):
inner_list = []
error_list = []
-
+ inner_list_append_temp=inner_list.append #a temp. variable which stores the lookup for append method
if node.collection_set:
for each in node.collection_set:
col_obj = node_collection.one({'_id': ObjectId(each)})
@@ -736,9 +736,9 @@ def get_inner_collection(collection_list, node):
inner_sub_list = get_inner_collection(inner_sub_list, col_obj)
if inner_sub_list:
- inner_list.append(inner_sub_list[0])
+ inner_list_append_temp(inner_sub_list[0])
else:
- inner_list.append(inner_sub_dict)
+ inner_list_append_temp(inner_sub_dict)
cl.update({'children': inner_list })
else:
@@ -750,27 +750,43 @@ def get_inner_collection(collection_list, node):
else:
return collection_list
+
@get_execution_time
def get_collection(request, group_id, node_id):
node = node_collection.one({'_id':ObjectId(node_id)})
# print "\nnode: ",node.name,"\n"
collection_list = []
-
- if node:
- if node.collection_set:
- for each in node.collection_set:
- obj = node_collection.one({'_id': ObjectId(each) })
- if obj:
- node_type = node_collection.one({'_id': ObjectId(obj.member_of[0])}).name
- collection_list.append({'name': obj.name, 'id': obj.pk,'node_type': node_type})
- collection_list = get_inner_collection(collection_list, obj)
-
-
+ collection_list_append_temp=collection_list.append
+
+ # def a(p,q,r):
+# collection_list.append({'name': p, 'id': q,'node_type': r})
+ #this empty list will have the Process objects as its elements
+ processes=[]
+ #Function used by Processes implemented below
+ def multi_(lst):
+ for each in lst:
+ obj = node_collection.one({'_id': ObjectId(each) })
+ if obj:
+ node_type = node_collection.one({'_id': ObjectId(obj.member_of[0])}).name
+ collection_list_append_temp({'name':obj.name,'id':obj.pk,'node_type':node_type})
+ collection_list = get_inner_collection(collection_list, obj)
+ #collection_list.append({'name':obj.name,'id':obj.pk,'node_type':node_type})
+
+
+ if node and node.collection_set:
+ t=len(node.collection_set)
+ x=multiprocessing.cpu_count()#returns no of cores in the cpu
+ n2=t/x#divides the list into those many parts
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes.append(multiprocessing.Process(target=multi_,args=(node.collection_set[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes[i].start()#each Process started
+ for i in range(x):
+ processes[i].join()#each Process converges
data = collection_list
return HttpResponse(json.dumps(data))
-# ###End of manipulating nodes collection####
-
@get_execution_time
def add_sub_themes(request, group_id):
if request.is_ajax() and request.method == "POST":
@@ -3068,9 +3084,8 @@ def get_districts(request, group_id):
}).sort('name', 1)
if cur_districts.count():
- for d in cur_districts:
- districts.append([str(d.subject), d.name.split(" -- ")[0]])
-
+ #loop replaced by a list comprehension
+ districts=[[str(d.subject), d.name.split(" -- ")[0]] for d in cur_districts]
else:
error_message = "No districts found"
raise Exception(error_message)
@@ -5402,6 +5417,9 @@ def page_scroll(request,group_id,page):
page='1'
if int(page) != int(tot_page) and int(page) != int(1):
page=int(page)+1
+ # temp. variables which stores the lookup for append method
+ user_activity_append_temp=user_activity.append
+ files_list_append_temp=files_list.append
for each in (paged_resources.page(int(page))).object_list:
if each.created_by == each.modified_by :
if each.last_update == each.created_at:
@@ -5412,9 +5430,9 @@ def page_scroll(request,group_id,page):
activity = 'created'
if each._type == 'Group':
- user_activity.append(each)
+ user_activity_append_temp(each)
each.update({'activity':activity})
- files_list.append(each)
+ files_list_append_temp(each)
else:
page=0
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/e-library.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/e-library.py
index 56ffac3..eb20bc3 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/e-library.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/e-library.py
@@ -100,6 +100,7 @@ def resource_list(request, group_id, app_id=None, page_no=1):
educationaluse_stats = {}
+
if files:
eu_list = [] # count
@@ -119,7 +120,6 @@ def resource_list(request, group_id, app_id=None, page_no=1):
# print educationaluse_stats
result_paginated_cur = files
result_pages = paginator.Paginator(result_paginated_cur, page_no, no_of_objs_pp)
-
datavisual.append({"name":"Doc", "count": educationaluse_stats.get("Documents", 0)})
datavisual.append({"name":"Image","count": educationaluse_stats.get("Images", 0)})
datavisual.append({"name":"Video","count": educationaluse_stats.get("Videos", 0)})
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/file.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/file.py
index 0ca7ece..2633129 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/file.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/file.py
@@ -84,7 +84,6 @@ def file(request, group_id, file_id=None, page_no=1):
shelves = []
shelf_list = {}
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
-
# if auth:
# has_shelf_RT = node_collection.one({'_type': 'RelationType', 'name': u'has_shelf' })
# dbref_has_shelf = has_shelf_RT.get_dbref()
@@ -691,6 +690,8 @@ def submitDoc(request, group_id):
group_name, group_id = get_group_name_id(group_id)
alreadyUploadedFiles = []
+ #a temp. variable which stores the lookup for append method
+ alreadyUploadedFiles_append_temp=alreadyUploadedFiles.append
str1 = ''
img_type=""
topic_file = ""
@@ -726,7 +727,7 @@ def submitDoc(request, group_id):
# if not obj_id_instance.is_valid(f):
# check if file is already uploaded file
if isinstance(f, list):
- alreadyUploadedFiles.append(f)
+ alreadyUploadedFiles_append_temp(f)
title = mtitle
# str1 = alreadyUploadedFiles
@@ -1245,16 +1246,19 @@ def file_detail(request, group_id, _id):
if auth:
has_shelf_RT = node_collection.one({'_type': 'RelationType', 'name': u'has_shelf' })
shelf = triple_collection.find({'_type': 'GRelation', 'subject': ObjectId(auth._id), 'relation_type.$id': has_shelf_RT._id })
-
+ #a temp. variable which stores the lookup for append method
+ shelves_append_temp=shelves.append
if shelf:
for each in shelf:
shelf_name = node_collection.one({'_id': ObjectId(each.right_subject)})
- shelves.append(shelf_name)
+ shelves_append_temp(shelf_name)
- shelf_list[shelf_name.name] = []
+ shelf_list[shelf_name.name] = []
+ #a temp. variable which stores the lookup for append method
+ shelf_list_shelfname_append_temp=shelf_list[shelf_name.name].append
for ID in shelf_name.collection_set:
shelf_item = node_collection.one({'_id': ObjectId(ID) })
- shelf_list[shelf_name.name].append(shelf_item.name)
+ shelf_list_shelfname_append_temp(shelf_item.name)
else:
shelves = []
@@ -1282,10 +1286,8 @@ def getFileThumbnail(request, group_id, _id):
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
- else:
- auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
- if auth:
- group_id = str(auth._id)
+ elif auth:
+ group_id = str(auth._id)
else:
pass
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/group.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/group.py
index 97a6915..c2297b6 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/group.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/group.py
@@ -1165,8 +1165,8 @@ def group(request, group_id, app_id=None, agency_type=None):
}).sort('last_update', -1)
if cur_groups_user.count():
- for group in cur_groups_user:
- group_nodes.append(group)
+ #loop replaced by a list comprehension
+ group_nodes=[group for group in cur_groups_user]
group_count = cur_groups_user.count()
@@ -1184,9 +1184,8 @@ def group(request, group_id, app_id=None, agency_type=None):
}).sort('last_update', -1)
if cur_public.count():
- for group in cur_public:
- group_nodes.append(group)
-
+ #loop replaced by a list comprehension
+ group_nodes=[group for group in cur_public]
group_count = cur_public.count()
return render_to_response("ndf/group.html",
@@ -1399,14 +1398,18 @@ def group_dashboard(request, group_id=None):
shelf_list = {}
if shelf:
+ #a temp. variable which stores the lookup for append method
+ shelves_append_temp=shelves.append
for each in shelf:
shelf_name = node_collection.one({'_id': ObjectId(each.right_subject)})
- shelves.append(shelf_name)
+ shelves_append_temp(shelf_name)
shelf_list[shelf_name.name] = []
+ #a temp. variable which stores the lookup for append method
+ shelf_lst_shelfname_append=shelf_list[shelf_name.name].append
for ID in shelf_name.collection_set:
shelf_item = node_collection.one({'_id': ObjectId(ID) })
- shelf_list[shelf_name.name].append(shelf_item.name)
+ shelf_lst_shelfname_append(shelf_item.name)
else:
shelves = []
@@ -1615,7 +1618,8 @@ def switch_group(request,group_id,node_id):
resource_exists = False
resource_exists_in_grps = []
response_dict = {'success': False, 'message': ""}
-
+ #a temp. variable which stores the lookup for append method
+ resource_exists_in_grps_append_temp=resource_exists_in_grps.append
new_grps_list_distinct = [ObjectId(item) for item in new_grps_list if ObjectId(item) not in existing_grps]
if new_grps_list_distinct:
for each_new_grp in new_grps_list_distinct:
@@ -1623,7 +1627,7 @@ def switch_group(request,group_id,node_id):
grp = node_collection.find({'name': node.name, "group_set": ObjectId(each_new_grp), "member_of":ObjectId(node.member_of[0])})
if grp.count() > 0:
resource_exists = True
- resource_exists_in_grps.append(unicode(each_new_grp))
+ resource_exists_in_grps_append_temp(unicode(each_new_grp))
response_dict["resource_exists_in_grps"] = resource_exists_in_grps
@@ -1649,11 +1653,15 @@ def switch_group(request,group_id,node_id):
data_list = []
user_id = request.user.id
all_user_groups = []
- for each in get_all_user_groups():
- all_user_groups.append(each.name)
+ # for each in get_all_user_groups():
+ # all_user_groups.append(each.name)
+ #loop replaced by a list comprehension
+ all_user_groups=[each.name for each in get_all_user_groups()]
st = node_collection.find({'$and': [{'_type': 'Group'}, {'author_set': {'$in':[user_id]}},{'name':{'$nin':all_user_groups}}]})
- for each in node.group_set:
- coll_obj_list.append(node_collection.one({'_id': each}))
+ # for each in node.group_set:
+ # coll_obj_list.append(node_collection.one({'_id': each}))
+ #loop replaced by a list comprehension
+ coll_obj_list=[node_collection.one({'_id': each}) for each in node.group_set ]
data_list = set_drawer_widget(st, coll_obj_list)
return HttpResponse(json.dumps(data_list))
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/methods.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/methods.py
index 2f983e2..ba06250 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/methods.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/methods.py
@@ -34,11 +34,11 @@ import ast
import string
import json
import locale
+import multiprocessing as mp
from datetime import datetime, timedelta, date
# import csv
# from collections import Counter
from collections import OrderedDict
-
col = db[Benchmark.collection_name]
history_manager = HistoryManager()
@@ -227,12 +227,28 @@ def get_gapps(default_gapp_listing=False, already_selected_gapps=[]):
# Then append their names in list of GApps to be excluded
if already_selected_gapps:
gapps_list_remove = gapps_list.remove
- for each_gapp in already_selected_gapps:
+ #Function used by Processes implemented below
+ def multi_(lst):
+ for each_gapp in lst:
gapp_name = each_gapp["name"]
if gapp_name in gapps_list:
gapps_list_remove(gapp_name)
-
+ #this empty list will have the Process objects as its elements
+ processes=[]
+ n1=len(already_selected_gapps)
+ lst1=already_selected_gapps
+ #returns no of cores in the cpu
+ x=mp.cpu_count()
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes.append(mp.Process(target=multi_,args=(lst1[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes[i].start() #each Process started
+ for i in range(x):
+ processes[i].join() #each Process converges
# Find all GAPPs
meta_type = node_collection.one({
"_type": "MetaType", "name": META_TYPE[0]
@@ -487,10 +503,8 @@ def get_drawers(group_id, nid=None, nlist=[], page_no=1, checked=None, **kwargs)
for each in drawer:
if each._id not in nlist:
dict1[each._id] = each
-
- for oid in nlist:
- obj = node_collection.one({'_id': oid})
- dict2.append(obj)
+ #loop replaced by a list comprehension
+ dict2=[node_collection.one({'_id': oid}) for oid in nlist]
dict_drawer['1'] = dict1
dict_drawer['2'] = dict2
@@ -501,10 +515,8 @@ def get_drawers(group_id, nid=None, nlist=[], page_no=1, checked=None, **kwargs)
if each._id != nid:
if each._id not in nlist:
dict1[each._id] = each
-
- for oid in nlist:
- obj = node_collection.one({'_id': oid})
- dict2.append(obj)
+ #loop replaced by a list comprehension
+ dict2=[node_collection.one({'_id': oid}) for oid in nlist]
dict_drawer['1'] = dict1
dict_drawer['2'] = dict2
@@ -746,7 +758,6 @@ def get_node_common_fields(request, node, group_id, node_type, coll_set=None):
node.tags = tags_list
is_changed = True
-
# Build collection, prior node, teaches and assesses lists
if check_collection:
changed = build_collection(node, check_collection, right_drawer_list, checked)
@@ -1030,7 +1041,7 @@ def build_collection(node, check_collection, right_drawer_list, checked):
else:
return False
-
+"""
@get_execution_time
def get_versioned_page(node):
rcs = RCS()
@@ -1048,7 +1059,7 @@ def get_versioned_page(node):
if line.find('status')!=-1:
up_ind=line.find('status')
if line.find(('PUBLISHED'),up_ind) !=-1:
- rev_no=rev_no.split()[0]
+ rev_no=rev_no.split()[0]
node=history_manager.get_version_document(node,rev_no)
proc1.kill()
return (node,rev_no)
@@ -1056,7 +1067,33 @@ def get_versioned_page(node):
node=history_manager.get_version_document(node,'1.1')
proc1.kill()
return(node,'1.1')
+"""
+@get_execution_time
+def get_versioned_page(node):
+ rcs = RCS()
+ fp = history_manager.get_file_path(node)
+ cmd= 'rlog %s' % \
+ (fp)
+ rev_no =""
+ proc1=subprocess.Popen(cmd,shell=True,
+ stdout=subprocess.PIPE)
+ for line in iter(proc1.stdout.readline,b''):
+ if line.find('revision')!=-1 and line.find('selected') == -1:
+ rev_no=string.split(line,'revision')
+ rev_no=rev_no[1].strip( '\t\n\r')
+ rev_no=rev_no.split()[0]
+ if line.find('status')!=-1:
+ up_ind=line.find('status')
+ if line.find(('PUBLISHED'),up_ind) !=-1:
+ rev_no=rev_no.split()[0]
+ node=history_manager.get_version_document(node,rev_no)
+ proc1.kill()
+ return (node,rev_no)
+ if rev_no == '1.1':
+ node=history_manager.get_version_document(node,'1.1')
+ proc1.kill()
+ return(node,'1.1')
@get_execution_time
@@ -1191,8 +1228,8 @@ def tag_info(request, group_id, tagname=None):
cur = node_collection.find({'tags': {'$regex': tagname, '$options': "i"},
'group_set':ObjectId(group_id)
})
- for every in cur:
- search_result.append(every)
+ #loop replaced by a list comprehension
+ search_result=[every for every in cur]
# Autheticate user can see all public files
elif request.user.is_authenticated():
@@ -1214,8 +1251,8 @@ def tag_info(request, group_id, tagname=None):
{'created_by': userid},
]
})
- for every in cur:
- search_result.append(every)
+ #loop replaced by a list comprehension
+ search_result=[every for every in cur]
else: # Unauthenticated user can see all public files.
group_node = node_collection.one({'_id': ObjectId(group_id)})
@@ -1226,8 +1263,8 @@ def tag_info(request, group_id, tagname=None):
'status': u'PUBLISHED'
}
)
- for every in cur:
- search_result.append(every)
+ #loop replaced by a list comprehension
+ search_result=[every for every in cur]
if search_result:
total = len(search_result)
@@ -1457,18 +1494,22 @@ def get_widget_built_up_data(at_rt_objectid_or_attr_name_list, node, type_of_set
"""
if not isinstance(at_rt_objectid_or_attr_name_list, list):
at_rt_objectid_or_attr_name_list = [at_rt_objectid_or_attr_name_list]
-
+ #a temp. variable which stores the lookup for append method
+ type_of_set_append_temp=type_of_set.append
if not type_of_set:
node["property_order"] = []
+ #a temp. variable which stores the lookup for append method
+ node_property_order_append_temp=node["property_order"].append
gst_nodes = node_collection.find({'_type': "GSystemType", '_id': {'$in': node["member_of"]}}, {'type_of': 1, 'property_order': 1})
for gst in gst_nodes:
for type_of in gst["type_of"]:
if type_of not in type_of_set:
- type_of_set.append(type_of)
+ type_of_set_append_temp(type_of)
for po in gst["property_order"]:
if po not in node["property_order"]:
- node["property_order"].append(po)
+ node_property_order_append_temp(po)
+
BASE_FIELD_METADATA = {
'name': {'name': "name", '_type': "BaseField", 'altnames': "Name", 'required': True},
@@ -1480,6 +1521,8 @@ def get_widget_built_up_data(at_rt_objectid_or_attr_name_list, node, type_of_set
}
widget_data_list = []
+ #a temp. variable which stores the lookup for append method
+ widget_data_list_append_temp=widget_data_list.append
for at_rt_objectid_or_attr_name in at_rt_objectid_or_attr_name_list:
if type(at_rt_objectid_or_attr_name) == ObjectId: #ObjectId.is_valid(at_rt_objectid_or_attr_name):
# For attribute-field(s) and/or relation-field(s)
@@ -1539,7 +1582,7 @@ def get_widget_built_up_data(at_rt_objectid_or_attr_name_list, node, type_of_set
data_type = node.structure[field.name]
value = node[field.name]
- widget_data_list.append({ '_type': field._type, # It's only use on details-view template; overridden in ndf_tags html_widget()
+ widget_data_list_append_temp({ '_type': field._type, # It's only use on details-view template; overridden in ndf_tags html_widget()
'_id': field._id,
'data_type': data_type,
'name': field.name, 'altnames': altnames,
@@ -1550,7 +1593,7 @@ def get_widget_built_up_data(at_rt_objectid_or_attr_name_list, node, type_of_set
# For node's base-field(s)
# widget_data_list.append([node['member_of'], BASE_FIELD_METADATA[at_rt_objectid_or_attr_name], node[at_rt_objectid_or_attr_name]])
- widget_data_list.append({ '_type': BASE_FIELD_METADATA[at_rt_objectid_or_attr_name]['_type'],
+ widget_data_list_append_temp({ '_type': BASE_FIELD_METADATA[at_rt_objectid_or_attr_name]['_type'],
'data_type': node.structure[at_rt_objectid_or_attr_name],
'name': at_rt_objectid_or_attr_name, 'altnames': BASE_FIELD_METADATA[at_rt_objectid_or_attr_name]['altnames'],
'value': node[at_rt_objectid_or_attr_name],
@@ -1578,21 +1621,25 @@ def get_property_order_with_value(node):
demo["property_order"] = []
type_of_set = []
+ #temp. variables which stores the lookup for append method
+ type_of_set_append_temp=type_of_set.append
+ demo_prop_append_temp=demo["property_order"].append
gst_nodes = node_collection.find({'_type': "GSystemType", '_id': {'$in': demo["member_of"]}}, {'type_of': 1, 'property_order': 1})
for gst in gst_nodes:
for type_of in gst["type_of"]:
if type_of not in type_of_set:
- type_of_set.append(type_of)
+ type_of_set_append_temp(type_of)
for po in gst["property_order"]:
if po not in demo["property_order"]:
- demo["property_order"].append(po)
+ demo_prop_append_temp(po)
demo.get_neighbourhood(node["member_of"])
-
+ #a temp. variable which stores the lookup for append method
+ new_property_order_append_temp=new_property_order.append
for tab_name, list_field_id_or_name in demo['property_order']:
list_field_set = get_widget_built_up_data(list_field_id_or_name, demo, type_of_set)
- new_property_order.append([tab_name, list_field_set])
+ new_property_order_append_temp([tab_name, list_field_set])
demo["property_order"] = new_property_order
@@ -1603,9 +1650,11 @@ def get_property_order_with_value(node):
if type_of_nodes.count():
demo["property_order"] = []
+ #a temp. variable which stores the lookup for append method
+ demo_prop_append_temp=demo["property_order"].append
for to in type_of_nodes:
for po in to["property_order"]:
- demo["property_order"].append(po)
+ demo_prop_append_temp(po)
node_collection.collection.update({'_id': demo._id}, {'$set': {'property_order': demo["property_order"]}}, upsert=False, multi=False)
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/mis.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/mis.py
index 3b4e149..ccee2be 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/mis.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/mis.py
@@ -3,6 +3,7 @@ import os
import ast
# from datetime import datetime
import datetime
+import multiprocessing as mp
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect
@@ -96,8 +97,13 @@ def mis_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
- for eachset in agency_type_node.collection_set:
- app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
+ #b=app_collection_set.append
+ #for eachset in agency_type_node.collection_set:
+ # b(a({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
+
+ # loop replaced by a list comprehension
+ app_collection_set=[node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}) for eachset in agency_type_node.collection_set]
+
# for eachset in app.collection_set:
# app_collection_set.append(node_collection.one({"_id":eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
@@ -196,18 +202,24 @@ def mis_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance
systemtype_relationtype_set = []
system = node_collection.find_one({"_id":ObjectId(app_set_instance_id)})
systemtype = node_collection.find_one({"_id":ObjectId(app_set_id)})
- for each in systemtype.attribute_type_set:
- systemtype_attributetype_set.append({"type":each.name,"type_id":str(each._id),"value":each.data_type})
- for each in systemtype.relation_type_set:
- systemtype_relationtype_set.append({"rt_name":each.name,"type_id":str(each._id)})
-
+ #for each in systemtype.attribute_type_set:
+ # systemtype_attributetype_set.append({"type":each.name,"type_id":str(each._id),"value":each.data_type})
+ #loop replaced by a list comprehension
+ systemtype_attributetype_set=[{"type":each.name,"type_id":str(each._id),"value":each.data_type} for each in systemtype.attribute_type_set]
+ #for each in systemtype.relation_type_set:
+ # systemtype_relationtype_set.append({"rt_name":each.name,"type_id":str(each._id)})
+ #loop replaced by a list comprehension
+ systemtype_relationtype_set=[{"rt_name":each.name,"type_id":str(each._id)} for each in systemtype.relation_type_set]
+ #temp. variables which stores the lookup for append method
+ atlist_append_temp=atlist.append
+ rtlist_append_temp=rtlist.append
for eachatset in systemtype_attributetype_set :
for eachattribute in triple_collection.find({"_type":"GAttribute", "subject":system._id, "attribute_type.$id":ObjectId(eachatset["type_id"])}):
- atlist.append({"type":eachatset["type"],"type_id":eachatset["type_id"],"value":eachattribute.object_value})
+ atlist_append_temp({"type":eachatset["type"],"type_id":eachatset["type_id"],"value":eachattribute.object_value})
for eachrtset in systemtype_relationtype_set :
for eachrelation in triple_collection.find({"_type":"GRelation", "subject":system._id, "relation_type.$id":ObjectId(eachrtset["type_id"])}):
right_subject = node_collection.find_one({"_id":ObjectId(eachrelation.right_subject)})
- rtlist.append({"type":eachrtset["rt_name"],"type_id":eachrtset["type_id"],"value_name": right_subject.name,"value_id":str(right_subject._id)})
+ rtlist_append_temp({"type":eachrtset["rt_name"],"type_id":eachrtset["type_id"],"value_name": right_subject.name,"value_id":str(right_subject._id)})
# To support consistent view
@@ -215,10 +227,10 @@ def mis_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance
system.get_neighbourhood(systemtype._id)
# array of dict for events ---------------------
-
+ #a temp. variable which stores the lookup for append method
+ events_arr_append_temp=events_arr.append
# if system.has_key('organiser_of_event') and len(system.organiser_of_event): # gives list of events
if 'organiser_of_event' in system and len(system.organiser_of_event): # gives list of events
-
for event in system.organiser_of_event:
event.get_neighbourhood(event.member_of)
@@ -236,7 +248,7 @@ def mis_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance
dt = event.end_time.strftime('%m/%d/%Y %H:%M')
tempdict['end'] = dt
tempdict['id'] = str(event._id)
- events_arr.append(tempdict)
+ events_arr_append_temp(tempdict)
# elif system.has_key('event_organised_by'): # gives list of colleges/host of events
elif 'event_organised_by' in system: # gives list of colleges/host of events
@@ -257,24 +269,27 @@ def mis_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance
tempdict['end'] = dt
tempdict['id'] = str(host._id)
- events_arr.append(tempdict)
+ events_arr_append_temp(tempdict)
# print json.dumps(events_arr)
# END --- array of dict for events ---------------------
-
+ #a temp. variable which stores the lookup for append method
+ property_display_order_append_temp=property_display_order.append
for tab_name, fields_order in property_order:
display_fields = []
+ #a temp. variable which stores the lookup for append method
+ display_fields_append_temp=display_fields.append
for field, altname in fields_order:
if system.structure[field] == bool:
- display_fields.append((altname, ("Yes" if system[field] else "No")))
+ display_fields_append_temp((altname, ("Yes" if system[field] else "No")))
elif not system[field]:
- display_fields.append((altname, system[field]))
+ display_fields_append_temp((altname, system[field]))
continue
elif system.structure[field] == datetime.datetime:
- display_fields.append((altname, system[field].date()))
+ display_fields_append_temp((altname, system[field].date()))
elif type(system.structure[field]) == list:
if system[field]:
@@ -282,19 +297,20 @@ def mis_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance
name_list = []
for right_sub_dict in system[field]:
name_list.append(right_sub_dict.name)
- display_fields.append((altname, ", ".join(name_list)))
+ display_fields_append_temp((altname, ", ".join(name_list)))
elif system.structure[field][0] == datetime.datetime:
date_list = []
- for dt in system[field]:
- date_list.append(dt.strftime("%d/%m/%Y"))
- display_fields.append((altname, ", ".join(date_list)))
+ #for dt in system[field]:
+ # date_list.append(dt.strftime("%d/%m/%Y"))
+ date_list=[dt.strftime("%d/%m/%Y") for dt in system[field]]
+ display_fields_append_temp((altname, ", ".join(date_list)))
else:
- display_fields.append((altname, ", ".join(system[field])))
+ display_fields_append_temp((altname, ", ".join(system[field])))
else:
- display_fields.append((altname, system[field]))
+ display_fields_append_temp((altname, system[field]))
- property_display_order.append((tab_name, display_fields))
+ property_display_order_append_temp((tab_name, display_fields))
# End of code
@@ -396,9 +412,11 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
- for eachset in agency_type_node.collection_set:
- app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
+ # for eachset in agency_type_node.collection_set:
+ # app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
+ #loop replaced by a list comprehension
+ app_collection_set=[node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}) for eachset in agency_type_node.collection_set]
# for eachset in app.collection_set:
# app_collection_set.append(node_collection.one({"_id":eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
# app_set = node_collection.find_one({"_id":eachset})
@@ -434,16 +452,22 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
systemtype = node_collection.find_one({"_id":ObjectId(app_set_id)})
systemtype_name = systemtype.name
title = systemtype_name + " - new"
- for each in systemtype.attribute_type_set:
- systemtype_attributetype_set.append({"type":each.name,"type_id":str(each._id),"value":each.data_type, 'sub_values': each.complex_data_type, 'altnames': each.altnames})
+ # for each in systemtype.attribute_type_set:
+ # systemtype_attributetype_set.append({"type":each.name,"type_id":str(each._id),"value":each.data_type, 'sub_values': each.complex_data_type, 'altnames': each.altnames})
+
+ #loop replaced by a list comprehension
+ systemtype_attributetype_set=[{"type":each.name,"type_id":str(each._id),"value":each.data_type, 'sub_values': each.complex_data_type, 'altnames': each.altnames} for each in systemtype.attribute_type_set]
+ #a temp. variable which stores the lookup for append method
+ sys_type_relation_set_append= systemtype_relationtype_set.append
for eachrt in systemtype.relation_type_set:
# object_type = [ {"name":rtot.name, "id":str(rtot._id)} for rtot in node_collection.find({'member_of': {'$all': [ node_collection.find_one({"_id":eachrt.object_type[0]})._id]}}) ]
object_type_cur = node_collection.find({'member_of': {'$in': eachrt.object_type}})
object_type = []
- for each in object_type_cur:
- object_type.append({"name":each.name, "id":str(each._id)})
- systemtype_relationtype_set.append({"rt_name": eachrt.name, "type_id": str(eachrt._id), "object_type": object_type})
+ # for each in object_type_cur:
+ # object_type.append({"name":each.name, "id":str(each._id)})
+ object_type=[{"name":each.name, "id":str(each._id)} for each in object_type_cur]
+ sys_type_relation_set_append({"rt_name": eachrt.name, "type_id": str(eachrt._id), "object_type": object_type})
request_at_dict = {}
request_rt_dict = {}
@@ -455,28 +479,60 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
node_id = node_collection.one({'name':each,'_type':'GSystemType'})._id
if node_id in app.type_of:
File = 'True'
-
+
if app_set_instance_id : # at and rt set editing instance
- system = node_collection.find_one({"_id":ObjectId(app_set_instance_id)})
- for eachatset in systemtype_attributetype_set :
- eachattribute = triple_collection.find_one({"_type":"GAttribute", "subject":system._id, "attribute_type.$id":ObjectId(eachatset["type_id"])})
- if eachattribute :
- eachatset['database_value'] = eachattribute.object_value
- eachatset['database_id'] = str(eachattribute._id)
- else :
- eachatset['database_value'] = ""
- eachatset['database_id'] = ""
- for eachrtset in systemtype_relationtype_set :
- eachrelation = triple_collection.find_one({"_type":"GRelation", "subject":system._id, "relation_type.$id":ObjectId(eachrtset["type_id"])})
- if eachrelation:
- right_subject = node_collection.find_one({"_id":ObjectId(eachrelation.right_subject)})
- eachrtset['database_id'] = str(eachrelation._id)
- eachrtset["database_value"] = right_subject.name
- eachrtset["database_value_id"] = str(right_subject._id)
- else :
- eachrtset['database_id'] = ""
- eachrtset["database_value"] = ""
- eachrtset["database_value_id"] = ""
+ system = node_collection.find_one({"_id":ObjectId(app_set_instance_id)})
+ #Function used by Processes implemented below
+ def multi_(lst):
+ for eachatset in lst:
+ eachattribute=triple_collection.find_one({"_type":"GAttribute", "subject":system._id, "attribute_type.$id":ObjectId(eachatset["type_id"])})
+ if eachattribute :
+ eachatset['database_value'] = eachattribute.object_value
+ eachatset['database_id'] = str(eachattribute._id)
+ else :
+ eachatset['database_value'] = ""
+ eachatset['database_id'] = ""
+ #this empty list will have the Process objects as its elements
+ processes=[]
+ #returns no of cores in the cpu
+ x=mp.cpu_count()
+ n1=len(systemtype_attributetype_set)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes.append(mp.Process(target=multi_,args=(systemtype_attributetype_set[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes[i].start()#each Process started
+ for i in range(x):
+ processes[i].join()#each Process converges
+
+ #Function used by Processes implemented below
+ def multi_2(lst):
+ for eachrtset in lst:
+ eachrelation = triple_collection.find_one({"_type":"GRelation", "subject":system._id, "relation_type.$id":ObjectId(eachrtset["type_id"])})
+ if eachrelation:
+ right_subject = node_collection.find_one({"_id":ObjectId(eachrelation.right_subject)})
+ eachrtset['database_id'] = str(eachrelation._id)
+ eachrtset["database_value"] = right_subject.name
+ eachrtset["database_value_id"] = str(right_subject._id)
+ else :
+ eachrtset['database_id'] = ""
+ eachrtset["database_value"] = ""
+ eachrtset["database_value_id"] = ""
+ #this empty list will have the Process objects as its elements
+ processes2=[]
+ n1=len(systemtype_relationtype_set)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes2.append(mp.Process(target=multi_2,args=(systemtype_relationtype_set[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes2[i].start()#each Process started
+ for i in range(x):
+ processes2[i].join()#each Process converges
+
tags = ",".join(system.tags)
content_org = system.content_org
@@ -495,13 +551,40 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
map_geojson_data = request.POST.get('map-geojson-data') # getting markers
user_last_visited_location = request.POST.get('last_visited_location') # getting last visited location by user
file1 = request.FILES.get('file', '')
-
- for each in systemtype_attributetype_set:
- if request.POST.get(each["type_id"],"") :
- request_at_dict[each["type_id"]] = request.POST.get(each["type_id"],"")
- for eachrtset in systemtype_relationtype_set:
- if request.POST.get(eachrtset["type_id"],""):
- request_rt_dict[eachrtset["type_id"]] = request.POST.get(eachrtset["type_id"],"")
+ #Function used by Processes implemented below
+ def multi_3(lst):
+ for each in lst:
+ if request.POST.get(each["type_id"],"") :
+ request_at_dict[each["type_id"]] = request.POST.get(each["type_id"],"")
+ #this empty list will have the Process objects as its elements
+ processes3=[]
+ n1=len(systemtype_attributetype_set)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes3.append(mp.Process(target=multi_3,args=(systemtype_attributetype_set[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes3[i].start()#each Process started
+ for i in range(x):
+ processes3[i].join()#each Process converges
+ #Function used by Processes implemented below
+ def multi_4(lst):
+ for eachrtset in lst:
+ if request.POST.get(eachrtset["type_id"],""):
+ request_rt_dict[eachrtset["type_id"]] = request.POST.get(eachrtset["type_id"],"")
+ #this empty list will have the Process objects as its elements
+ processes4=[]
+ n1=len(systemtype_relationtype_set)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes4.append(mp.Process(target=multi_4,args=(systemtype_relationtype_set[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes4[i].start()#each Process started
+ for i in range(x):
+ processes4[i].join() #each Process converges
if File == 'True':
if file1:
@@ -565,15 +648,26 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
newgsystem.save()
if not app_set_instance_id:
- for key, value in request_at_dict.items():
- attributetype_key = node_collection.find_one({"_id":ObjectId(key)})
- ga_node = create_gattribute(newgsystem._id, attributetype_key, value)
- # newattribute = triple_collection.collection.GAttribute()
- # newattribute.subject = newgsystem._id
- # newattribute.attribute_type = attributetype_key
- # newattribute.object_value = value
- # newattribute.save()
- for key, value in request_rt_dict.items():
+ #Function used by Processes implemented below
+ def multi_5(lst):
+ for key,value in lst:
+ attributetype_key = node_collection.find_one({"_id":ObjectId(key)})
+ ga_node = create_gattribute(newgsystem._id, attributetype_key, value)
+ #this empty list will have the Process objects as its elements
+ processes5=[]
+ lst11=request_at_dict.items()
+ n1=len(lst11)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes5.append(mp.Process(target=multi_5,args=(lst11[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes5[i].start()#each Process started
+ for i in range(x):
+ processes5[i].join()#each Process converges
+ """
+ for key, value in request_rt_dict.items():
if key:
relationtype_key = node_collection.find_one({"_id": ObjectId(key)})
if value:
@@ -584,9 +678,33 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
# newrelation.relation_type = relationtype_key
# newrelation.right_subject = right_subject._id
# newrelation.save()
+ """
+ def multi_6(lst):#Function used by Processes implemented below
+ for key,value in lst:
+ if key:
+ relationtype_key = node_collection.find_one({"_id": ObjectId(key)})
+ if value:
+ right_subject = node_collection.find_one({"_id": ObjectId(value)})
+ gr_node = create_grelation(newgsystem._id, relationtype_key, right_subject._id)
+
+ #this empty list will have the Process objects as its elements
+ processes6=[]
+ lst12=request_rt_dict.items()
+ n1=len(lst12)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes6.append(mp.Process(target=multi_6,args=(lst12[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes6[i].start()#each Process started
+ for i in range(x):
+ processes6[i].join()#each Process converges
+
if app_set_instance_id:
# editing instance
+ """
for each in systemtype_attributetype_set:
if each["database_id"]:
attribute_instance = triple_collection.find_one({"_id": ObjectId(each['database_id'])})
@@ -602,7 +720,38 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
# newattribute.object_value = request.POST.get(each["type_id"],"")
# newattribute.save()
ga_node = create_gattribute(newgsystem._id, attributetype_key, request.POST.get(each["type_id"],""))
-
+ """
+ def multi_7(lst):#Function used by Processes implemented below
+ for each in lst:
+ if each["database_id"]:
+ attribute_instance = triple_collection.find_one({"_id": ObjectId(each['database_id'])})
+ attribute_instance.object_value = request.POST.get(each["database_id"],"")
+ # attribute_instance.save()
+ ga_node = create_gattribute(attribute_instance.subject, attribute_instance.attribute_type, attribute_instance.object_value)
+ else:
+ if request.POST.get(each["type_id"], ""):
+ attributetype_key = node_collection.find_one({"_id":ObjectId(each["type_id"])})
+ # newattribute = triple_collection.collection.GAttribute()
+ # newattribute.subject = newgsystem._id
+ # newattribute.attribute_type = attributetype_key
+ # newattribute.object_value = request.POST.get(each["type_id"],"")
+ # newattribute.save()
+ ga_node = create_gattribute(newgsystem._id, attributetype_key, request.POST.get(each["type_id"],""))
+ #this empty list will have the Process objects as its elements
+ processes7=[]
+ n1=len(systemtype_attributetype_set)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes7.append(mp.Process(target=multi_7,args=(systemtype_attributetype_set[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes7[i].start()#each Process started
+ for i in range(x):
+ processes7[i].join()#each Process converges
+
+
+ """
for eachrt in systemtype_relationtype_set:
if eachrt["database_id"]:
relation_instance = triple_collection.find_one({"_id":ObjectId(eachrt['database_id'])})
@@ -619,7 +768,32 @@ def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance
# newrelation.relation_type = relationtype_key
# newrelation.right_subject = right_subject._id
# newrelation.save()
-
+ """
+ def multi_8(lst):#Function used by Processes implemented below
+ for eachrt in lst:
+ if eachrt["database_id"]:
+ relation_instance = triple_collection.find_one({"_id":ObjectId(eachrt['database_id'])})
+ relation_instance.right_subject = ObjectId(request.POST.get(eachrt["database_id"],""))
+ # relation_instance.save()
+ gr_node = create_grelation(relation_instance.subject, relation_instance.relation_type, relation_instance.right_subject)
+ else :
+ if request.POST.get(eachrt["type_id"],""):
+ relationtype_key = node_collection.find_one({"_id":ObjectId(eachrt["type_id"])})
+ right_subject = node_collection.find_one({"_id":ObjectId(request.POST.get(eachrt["type_id"],""))})
+ gr_node = create_grelation(newgsystem._id, relationtype_key, right_subject._id)
+
+ #this empty list will have the Process objects as its elements
+ processes8=[]
+ n1=len(systemtype_relationtype_set)
+ #divides the list into those many parts
+ n2=n1/x
+ #Process object is created.The list after being partioned is also given as an argument.
+ for i in range(x):
+ processes8.append(mp.Process(target=multi_4,args=(systemtype_relationtype_set[i*n2:(i+1)*n2])))
+ for i in range(x):
+ processes8[i].start()#each Process started
+ for i in range(x):
+ processes8[i].join() #each Process converges
return HttpResponseRedirect(reverse(app_name.lower()+":"+template_prefix+'_app_detail', kwargs={'group_id': group_id, "app_id":app_id, "app_set_id":app_set_id}))
template = "ndf/"+template_prefix+"_create_edit.html"
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/page.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/page.py
index f88df0d..b4d711a 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/page.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/page.py
@@ -1,6 +1,7 @@
''' -- imports from python libraries -- '''
# import os -- Keep such imports here
import json
+import multiprocessing as mp
from difflib import HtmlDiff
''' -- imports from installed packages -- '''
@@ -82,7 +83,7 @@ def page(request, group_id, app_id=None):
# Code for user shelf
shelves = []
shelf_list = {}
- auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
+ auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
# if auth:
# has_shelf_RT = node_collection.one({'_type': 'RelationType', 'name': u'has_shelf' })
@@ -175,10 +176,11 @@ def page(request, group_id, app_id=None):
if node is None:
node = node_collection.find({'member_of':ObjectId(app_id)})
-
+ #a temp. variable which stores the lookup for append method
+ content_append_temp=content.append
for nodes in node:
node,ver=get_versioned_page(nodes)
- content.append(node)
+ content_append_temp(node)
# rcs content ends here
@@ -293,8 +295,10 @@ def create_edit_page(request, group_id, node_id=None):
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(gst_page._id),'group_set': ObjectId(group_id) })
nodes_list = []
- for each in available_nodes:
- nodes_list.append(str((each.name).strip().lower()))
+ # for each in available_nodes:
+ # nodes_list.append(str((each.name).strip().lower()))
+ #loop replaced by a list comprehension
+ node_list=[str((each.name).strip().lower()) for each in available_nodes]
if node_id:
page_node = node_collection.one({'_type': u'GSystem', '_id': ObjectId(node_id)})
@@ -415,11 +419,9 @@ def translate_node(request,group_id,node_id=None):
content = data
node_details=[]
for k,v in content.items():
-
- node_name = content['name']
- node_content_org=content['content_org']
- node_tags=content['tags']
-
+ node_name = content['name']
+ node_content_org=content['content_org']
+ node_tags=content['tags']
return render_to_response("ndf/translation_page.html",
{'content': content,
'appId':app._id,
@@ -430,7 +432,7 @@ def translate_node(request,group_id,node_id=None):
},
context_instance = RequestContext(request)
- )
+ )
@get_execution_time
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/person.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/person.py
index 6ebae99..f3e0e3d 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/person.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/person.py
@@ -44,7 +44,7 @@ def person_detail(request, group_id, app_id=None, app_set_id=None, app_set_insta
if group_ins:
group_id = str(group_ins._id)
else :
- auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
+ # auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
@@ -86,8 +86,11 @@ def person_detail(request, group_id, app_id=None, app_set_id=None, app_set_insta
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
- for eachset in agency_type_node.collection_set:
- app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
+ # for eachset in agency_type_node.collection_set:
+ # app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
+ #loop replaced by a list comprehension
+ app_collection_set=[node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}) for eachset in agency_type_node.collection_set]
+
if app_set_id:
person_gst = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)})#, {'name': 1, 'type_of': 1})
@@ -115,8 +118,10 @@ def person_detail(request, group_id, app_id=None, app_set_id=None, app_set_insta
]
widget_for = get_widget_built_up_data(widget_for, person_gs)
- for each in univ_cur:
- univ_list.append(each)
+# for each in univ_cur:
+# univ_list.append(each)
+ #loop replaced by a list comprehension
+ univ_list=[each for each in univ_cur]
elif title == "Program Officer" or title == "Voluntary Teacher":
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/search_views.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/search_views.py
index fec66be..7de4169 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/search_views.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/search_views.py
@@ -17,6 +17,7 @@ import string
import datetime
import itertools
import nltk
+import multiprocessing as mp
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.models import *
@@ -257,7 +258,9 @@ def results_search(request, group_id, return_only_dict = None):
For each matching GSystem, see if the GSystem has already been added to the list of ids and add if not added.
result is added only if belongs to the list of public groups
"""
-
+ #temp. variables which stores the lookup for append method
+ all_ids_append_temp=all_ids.append
+ search_results_ex_name_append_temp=search_results_ex['name'].append
for j in exact_match:
j.name=(j.name).replace('"',"'")
if j._id not in all_ids:
@@ -265,8 +268,8 @@ def results_search(request, group_id, return_only_dict = None):
#for gr in public_groups:
# if gr in grps:
j = addType(j)
- search_results_ex['name'].append(j)
- all_ids.append(j['_id'])
+ search_results_ex_name_append_temp(j)
+ all_ids_append_temp(j['_id'])
# SORTS THE SEARCH RESULTS BY SIMILARITY WITH THE SEARCH QUERY
#search_results_ex['name'] = sort_names_by_similarity(search_results_ex['name'], search_str_user)
@@ -274,7 +277,8 @@ def results_search(request, group_id, return_only_dict = None):
split_stem_match = [] # will hold all the split stem match results
len_stemmed = len(search_str_stemmed)
c = 0 # GEN. COUNTER
-
+ #a temp. variable which stores the lookup for append method
+ split_stem_match_append_temp=split_stem_match.append
while c < len_stemmed:
word = search_str_stemmed[c]
temp=""
@@ -293,13 +297,15 @@ def results_search(request, group_id, return_only_dict = None):
{'$or':[{"access_policy":{"$in":Access_policy}},{'created_by':request.user.id}]},
{"name":{"$regex":str(word), "$options":"i"}}] },
{"name":1, "_id":1, "member_of":1, "created_by":1, "last_update":1, "group_set":1, "url":1}).sort('last_update',-1)
- split_stem_match.append(temp)
+ split_stem_match_append_temp(temp)
c += 1
"""
For each matching GSystem, see if the GSystem has already been returned in search results and add if not already added.
Result is added only if belongs to the list of public groups and has public access policy
"""
+ #a temp. variable which stores the lookup for append method
+ search_results_st_name_append=search_results_st['name'].append
for j in split_stem_match:
c = 0
for k in j:
@@ -313,8 +319,8 @@ def results_search(request, group_id, return_only_dict = None):
# check that the GSystem should belong to at least one public group
k = addType(k) # adds the link and datetime to the
- search_results_st['name'].append(k)
- all_ids.append(k['_id'])#append to the list of all ids of GSYstems in the results
+ search_results_st_name_append(k)
+ all_ids_append_temp(k['_id'])#append to the list of all ids of GSYstems in the results
c += 1
# SORTS THE SEARCH RESULTS BY SIMILARITY WITH THE SEARCH QUERY
@@ -340,6 +346,9 @@ def results_search(request, group_id, return_only_dict = None):
{"group_set":ObjectId(group_id)},
{"tags":search_str_user}]},
{"name":1, "_id":1, "member_of":1, "created_by":1, "last_update":1, "group_set":1, "url":1}).sort('last_update',-1)
+ # temp. variables which stores the lookup for append method
+ all_ids_append_temp=all_ids.append
+ search_results_ex_tags_append_temp=search_results_ex['tags'].append
for j in exact_match:
j.name=(j.name).replace('"',"'")
if j._id not in all_ids:
@@ -348,8 +357,8 @@ def results_search(request, group_id, return_only_dict = None):
#for gr in public_groups:
# if gr in grps:
j = addType(j)
- search_results_ex['tags'].append(j)
- all_ids.append(j['_id'])
+ search_results_ex_tags_append_temp(j)
+ all_ids_append_temp(j['_id'])
#search_results_ex['tags'] = sort_names_by_similarity(search_results_ex['tags'], search_str_user)
@@ -358,7 +367,8 @@ def results_search(request, group_id, return_only_dict = None):
split_stem_match = []
c = 0 # GEN. COUNTER
len_stemmed = len(search_str_stemmed)
-
+ #a temp. variable which stores the lookup for append method
+ split_stem_match_append_temp=split_stem_match.append
while c < len_stemmed:
word = search_str_stemmed[c]
if user_reqd != -1:
@@ -375,7 +385,7 @@ def results_search(request, group_id, return_only_dict = None):
{"group_set":ObjectId(group_id)}]},
{"name":1, "_id":1, "member_of":1, "created_by":1, "last_update":1, "group_set":1, "url":1}).sort('last_update',-1)
- split_stem_match.append(temp)
+ split_stem_match_append_temp(temp)
c += 1
#search_results_st['tags'] = sort_names_by_similarity(search_results_st['tags'], search_str_user)
@@ -383,6 +393,8 @@ def results_search(request, group_id, return_only_dict = None):
For each matching GSystem, see if the GSystem has already been returned in search results and add if not already added.
Result is added only if belongs to the list of public groups and has public access policy
"""
+ #a temp. variable which stores the lookup for append method
+ search_results_st_tags_append=search_results_st['tags'].append
for j in split_stem_match:
c = 0
for k in j:
@@ -392,8 +404,8 @@ def results_search(request, group_id, return_only_dict = None):
#for gr in public_groups:
# if gr in grps:
k = addType(k)
- search_results_st['tags'].append(k)
- all_ids.append(k['_id'])
+ search_results_st_tags_append(k)
+ all_ids_append_temp(k['_id'])
c += 1
"""
@@ -405,7 +417,8 @@ def results_search(request, group_id, return_only_dict = None):
content_docs = []
content_match_pairs = [] # STORES A DICTIONARY OF MATCHING DOCUMENTS AND NO_OF_WORDS THAT MATCH SEARCH QUERY
sorted_content_match_pairs = [] # STORES THE ABOVE DICTIONARY IN A SORTED MANNER
-
+ #a temp. variable which stores the lookup for append method
+ content_match_pairs_append_temp=content_match_pairs.append
if (search_by_contents == True):
# FETCH ALL THE GSYSTEMS THAT HAVE BEEN MAP REDUCED.
all_Reduced_documents = node_collection.find({"required_for": reduced_doc_requirement}, {"content": 1, "_id": 0, "orignal_id": 1})
@@ -418,10 +431,9 @@ def results_search(request, group_id, return_only_dict = None):
for word in search_str_stemmed:
if word in content.keys():# IF THE WORD EXISTS IN THE CURRENT DOCUMENT
match_count += content[word] # ADD IT TO THE MATCHES COUNT
-
if match_count > 0:
all_ids.append(singleDoc.orignal_id)
- content_match_pairs.append({'doc_id':singleDoc.orignal_id, 'matches':match_count})
+ content_match_pairs_append_temp({'doc_id':singleDoc.orignal_id, 'matches':match_count})
match_counts = [] # KEEPS A SORTED LIST OF COUNT OF MATCHES IN RESULT DOCUMENTS
for pair in content_match_pairs:
@@ -430,7 +442,8 @@ def results_search(request, group_id, return_only_dict = None):
c += 1
match_counts.insert(c, pair['matches'])
sorted_content_match_pairs.insert(c, pair) # SORTED INSERT (INCREASING ORDER)
-
+ #a temp. variable which stores the lookup for append method
+ search_results_st_content_append_temp=search_results_st['content'].append
for docId in sorted_content_match_pairs:
doc = node_collection.find_one({"_id":docId['doc_id'], "access_policy":Access_policy}, {"name":1, "_id":1, "member_of":1, "created_by":1, "last_update":1, "group_set":1, "url":1})
try:
@@ -447,9 +460,9 @@ def results_search(request, group_id, return_only_dict = None):
if ObjectId(group_id) in grps:
if user_reqd != -1:
if User.objects.get(username=doc['created_by']).pk == user_reqd:
- search_results_st['content'].append(doc)
+ search_results_st_content_append_temp(doc)
else:
- search_results_st['content'].append(doc)
+ search_results_st_content_append_temp(doc)
except:
pass
#search_results = json.dumps(search_results, cls=Encoder)
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/topics.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/topics.py
index 2344f1f..0a1d090 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/topics.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/topics.py
@@ -724,7 +724,8 @@ def topic_detail_view(request, group_id, app_Id=None):
nav_l=request.GET.get('nav_li','')
breadcrumbs_list = []
nav_li = ""
-
+ #a temp. variable which stores the lookup for append method
+ breadcrumbs_list_append_temp=breadcrumbs_list.append
if nav_l:
nav_li = nav_l
nav_l = str(nav_l).split(",")
@@ -739,9 +740,9 @@ def topic_detail_view(request, group_id, app_Id=None):
if each_obj.prior_node:
theme_obj = node_collection.one({'_id': ObjectId(each_obj.prior_node[0] ) })
theme_id = theme_obj._id
- breadcrumbs_list.append( (str(theme_obj._id), theme_obj.name) )
+ breadcrumbs_list_append_temp( (str(theme_obj._id), theme_obj.name) )
- breadcrumbs_list.append( (str(each_obj._id), each_obj.name) )
+ breadcrumbs_list_append_temp( (str(each_obj._id), each_obj.name) )
@@ -752,6 +753,8 @@ def topic_detail_view(request, group_id, app_Id=None):
###shelf###
shelves = []
+ #a temp. variable which stores the lookup for append method
+ shelves_append_temp=shelves.append
shelf_list = {}
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
@@ -763,12 +766,14 @@ def topic_detail_view(request, group_id, app_Id=None):
if shelf:
for each in shelf:
shelf_name = node_collection.one({'_id': ObjectId(each.right_subject)})
- shelves.append(shelf_name)
+ shelves_append_temp(shelf_name)
shelf_list[shelf_name.name] = []
+ #a temp. variable which stores the lookup for append method
+ shelf_list_shlefname_append_temp=shelf_list[shelf_name.name].append
for ID in shelf_name.collection_set:
shelf_item = node_collection.one({'_id': ObjectId(ID) })
- shelf_list[shelf_name.name].append(shelf_item.name)
+ shelf_list_shlefname_append_temp(shelf_item.name)
else:
shelves = []
diff --git a/gnowsys-ndf/gnowsys_ndf/ndf/views/userDashboard.py b/gnowsys-ndf/gnowsys_ndf/ndf/views/userDashboard.py
index c93f572..c64cac9 100644
--- a/gnowsys-ndf/gnowsys_ndf/ndf/views/userDashboard.py
+++ b/gnowsys-ndf/gnowsys_ndf/ndf/views/userDashboard.py
@@ -64,7 +64,7 @@ def userpref(request,group_id):
@login_required
@get_execution_time
def uDashboard(request, group_id):
- usrid = int(group_id)
+ usrid = int(group_id)
auth = node_collection.one({'_type': "Author", 'created_by': usrid})
group_id = auth._id
# Fetching user group of current user & then reassigning group_id with it's corresponding ObjectId value
@@ -150,37 +150,37 @@ def uDashboard(request, group_id):
user_activity = []
page_gst = node_collection.one({'_type': "GSystemType", 'name': 'Page'})
- page_cur = node_collection.find({'member_of': {'$all': [page_gst._id]},
+ page_cur = node_collection.find ({'member_of': {'$all': [page_gst._id]},
'created_by': int(usrid), "status": {"$nin": ["HIDDEN"]}})
- file_cur = node_collection.find({'_type': u"File", 'created_by': int(usrid),
+ file_cur = node_collection.find ({'_type': u"File", 'created_by': int(usrid),
"status": {"$nin": ["HIDDEN"]}})
forum_gst = node_collection.one({"_type": "GSystemType", "name": "Forum"})
- forum_count = node_collection.find({"_type": "GSystem",
+ forum_count = node_collection.find ({"_type": "GSystem",
"member_of": forum_gst._id, 'created_by': int(usrid),
"status": {"$nin": ["HIDDEN"]}})
quiz_gst = node_collection.one({"_type": "GSystemType", "name": "Quiz"})
- quiz_count = node_collection.find({"_type": "GSystem",
+ quiz_count = node_collection.find ({"_type": "GSystem",
"member_of": quiz_gst._id, 'created_by': int(usrid),
"status": {"$nin": ["HIDDEN"]}})
thread_gst = node_collection.one({"_type": "GSystemType", "name": "Twist"})
- thread_count = node_collection.find({"_type": "GSystem",
+ thread_count =node_collection.find ({"_type": "GSystem",
"member_of": thread_gst._id, 'created_by': int(usrid),
"status": {"$nin": ["HIDDEN"]}})
reply_gst = node_collection.one({"_type": "GSystemType", "name": "Reply"})
- reply_count = node_collection.find({"_type": "GSystem",
+ reply_count = node_collection.find ({"_type": "GSystem",
"member_of": reply_gst._id, 'created_by': int(usrid),
"status": {"$nin": ["HIDDEN"]}})
task_cur = ""
if current_user:
if int(current_user) == int(usrid):
- task_cur = node_collection.find(
+ task_cur = node_collection.find (
{'member_of': task_gst._id, 'attribute_set.Status': {'$in': ["New", "In Progress"]}, 'attribute_set.Assignee':usrid}
).sort('last_update', -1).limit(10)
dashboard_count.update({'Task': task_cur.count()})
- group_cur = node_collection.find(
+ group_cur = node_collection.find (
{'_type': "Group", 'name': {'$nin': ["home", auth.name]},"access_policy":{"$in":Access_policy},
'$or': [{'group_admin': int(usrid)}, {'author_set': int(usrid)}]}).sort('last_update', -1).limit(10)
@@ -189,7 +189,7 @@ def uDashboard(request, group_id):
# user activity gives all the activities of the users
activity = ""
- activity_user = node_collection.find(
+ activity_user = node_collection.find (
{'$and': [{'$or': [{'_type': 'GSystem'}, {'_type': 'group'},
{'_type': 'File'}]}, {"access_policy": {"$in": Access_policy}},{'status':{'$in':[u"DRAFT",u"PUBLISHED"]}},
{'member_of': {'$nin': [exclued_from_public]}},
@@ -199,10 +199,13 @@ def uDashboard(request, group_id):
a_user = []
dashboard_count.update({'activity': activity_user.count()})
- for i in activity_user:
- if i._type != 'Batch' or i._type != 'Course' or i._type != 'Module':
- a_user.append(i)
-
+ #for i in activity_user:
+ # if i._type != 'Batch' or i._type != 'Course' or i._type != 'Module':
+ # a_user.append(i)
+ #loop replaced by a list comprehension
+ a_user=[i for i in activity_user if (i._type != 'Batch' or i._type != 'Course' or i._type != 'Module')]
+ #a temp. variable which stores the lookup for append method
+ user_activity_append_temp=user_activity.append
for each in a_user:
if each.created_by == each.modified_by:
if each.last_update == each.created_at:
@@ -213,10 +216,10 @@ def uDashboard(request, group_id):
activity = 'created'
if each._type == 'Group':
- user_activity.append(each)
+ user_activity_append_temp(each)
else:
member_of = node_collection.find_one({"_id": each.member_of[0]})
- user_activity.append(each)
+ user_activity_append_temp(each)
'''
notification_list=[]
@@ -240,11 +243,13 @@ def uDashboard(request, group_id):
task_cur gives the task asigned to users
'''
- obj = node_collection.find(
+ obj = node_collection.find (
{'_type': {'$in': [u"GSystem", u"File"]}, 'contributors': int(usrid),
'group_set': {'$all': [ObjectId(group_id)]}}
)
collab_drawer = []
+ #a temp. variable which stores the lookup for append method
+ collab_drawer_append_temp=collab_drawer.append
"""
To populate collaborators according
to their latest modification of particular resource:
@@ -252,7 +257,7 @@ def uDashboard(request, group_id):
for each in obj.sort('last_update', -1):
for val in each.contributors:
name = User.objects.get(pk=val).username
- collab_drawer.append({'usrname': name, 'Id': val,
+ collab_drawer_append_temp({'usrname': name, 'Id': val,
'resource': each.name})
shelves = []
@@ -305,7 +310,6 @@ def uDashboard(request, group_id):
context_instance=RequestContext(request)
)
-
@get_execution_time
def user_preferences(request,group_id,auth_id):
try:
@@ -350,6 +354,7 @@ def user_preferences(request,group_id,auth_id):
except Exception as e:
print "Exception in userpreference view "+str(e)
return HttpResponse("Failure")
+
@get_execution_time
def user_template_view(request,group_id):
auth_group = None