我试图重写一些csv读取代码,以便能够在Python 3.2.2中的多个内核上运行它。我尝试使用Pool
多重处理的对象,该对象是根据工作示例改编而成的(并且已经为我的项目的另一部分工作了)。我遇到了一条错误消息,发现很难破译和排除故障。
错误:
Traceback (most recent call last):
File "parser5_nodots_parallel.py", line 256, in <module>
MG,ppl = csv2graph(r)
File "parser5_nodots_parallel.py", line 245, in csv2graph
node_chunks)
File "/Library/Frameworks/Python.framework/Versions/3.2/lib/python3.2/multiprocessing/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/Library/Frameworks/Python.framework/Versions/3.2/lib/python3.2/multiprocessing/pool.py", line 552, in get
raise self._value
AttributeError: __exit__
相关代码:
import csv
import time
import datetime
import re
from operator import itemgetter
from multiprocessing import Pool
import itertools
def chunks(l,n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c,n))
if not x:
return
yield x
def csv2nodes(r):
strptime = time.strptime
mktime = time.mktime
l = []
ppl = set()
pattern = re.compile(r"""[A-Za-z0-9"/]+?(?=[,\n])""")
for row in r:
with pattern.findall(row) as f:
cell = int(f[3])
id = int(f[2])
st = mktime(strptime(f[0],'%d/%m/%Y'))
ed = mktime(strptime(f[1],'%d/%m/%Y'))
# collect list
l.append([(id,cell,{1:st,2: ed})])
# collect separate sets
ppl.add(id)
return (l,ppl)
def csv2graph(source):
MG=nx.MultiGraph()
# Remember that I use integers for edge attributes, to save space! Dic above.
# start: 1
# end: 2
p = Pool()
node_divisor = len(p._pool)
node_chunks = list(chunks(source,int(len(source)/int(node_divisor))))
num_chunks = len(node_chunks)
pedgelists = p.map(csv2nodes,
node_chunks)
ll = []
ppl = set()
for l in pedgelists:
ll.append(l[0])
ppl.update(l[1])
MG.add_edges_from(ll)
return (MG,ppl)
with open('/Users/laszlosandor/Dropbox/peers_prisons/python/codetenus_test.txt','r') as source:
r = source.readlines()
MG,ppl = csv2graph(r)
解决此问题的好方法是什么?
None
由于范围问题,我不小心通过了a 。