由买买提看人间百态

boards

本页内容为未名空间相应帖子的节选和存档,一周内的贴子最多显示50字,超过一周显示500字 访问原贴
CS版 - 如何用python web.py web service 做 multiple parameters 的 c (转载)
相关主题
请问一个在mac上面openmpi 安装的问题google没法用,大虾谁帮我看一下? (转载)
[合集] destro爱CS - episode 1 (EE chanllenge CS)请教:如何用Java get URL content是.swe并且是utf-16 encoding (转载)
[合集] REVISION Re: destro爱CS - episode 1再弱问java:这是java version不同造成的吗? (转载)
Latex problem问个关于socket问题
memorial page for Prof. Hongjun Lu用libxml2 API open xml file的问题
Re: 关于网叶连接的路径问题,急救!!问一个bloom filter 和 bitmap的使用区别
写paper引用wiki的东西可以吗?问个HTML DOM JavaScript相关的问题 (转载)
help with Programming!!!google site mapping 怎么弄?谢谢! (转载)
相关话题的讨论汇总
话题: s2话题: title1话题: str话题: title话题: __
进入CS版参与讨论
1 (共1页)
t***q
发帖数: 418
1
【 以下文字转载自 Programming 讨论区 】
发信人: treeq (treeq), 信区: Programming
标 题: 如何用python web.py web service 做 multiple parameters 的 call?
发信站: BBS 未名空间站 (Sun Mar 22 23:14:33 2015, 美东)
大家好。我的那个web service 做成了。用了python 的 web.py.
install web.py
cd webpy
编辑python web service.
#!/usr/bin/env python
import web
import csv
import difflib
import re
import operator
import Levenshtein
urls = ('/title_matching2','title_matching2')
app = web.application(urls,globals())
class title_matching2:
def __init__(self):
self.hello = "hello world"
def GET(self):
getInput = web.input(name="World")
b=[]
with open("Book1.txt","rb") as k:
for row in k:
b.append(row.split("t"))
k.close()
c=[]
with open("series_name.txt","rb") as j:
for row in j:
c.append(row.split("t"))
j.close()
d=[]
with open("season_name.txt","rb") as s:
for row in s:
d.append(row.split("t"))
s.close()
dd={}
ee={}
cc={}
gg={}
title=str(getInput.name)
title1=re.sub(r',',' ',title)
title1=title1.lower()
title1=re.sub(r'series',' ',title1)
title1=re.sub(r'episode',' ',title1)
title1=re.sub(r'season',' ',title1)
title1=re.sub(r'"',' ',title1)
title1=re.sub(r'-',' ',title1)
title1=re.sub(r':',' ',title1)
title1=re.sub(r' ','',title1)
for j in range(len(b)):
s2=re.sub(r',',' ',b[j][3])
s2=s2.lower()
s2=re.sub(r'series',' ',s2)
s2=re.sub(r'episode',' ',s2)
s2=re.sub(r'season',' ',s2)
s2=re.sub(r'"',' ',s2)
s2=re.sub(r'-',' ',s2)
s2=re.sub(r':',' ',s2)
s2=re.sub(r' ','',s2)
b[j].append(s2)
s3=re.sub(r',',' ',b[j][3]).lower()
b[j].append(s3)
for t in range(len(c)):
s2=re.sub(r',',' ',c[t][0])
s2=s2.lower()
s2=re.sub(r'series',' ',s2)
s2=re.sub(r'episode',' ',s2)
s2=re.sub(r'season',' ',s2)
s2=re.sub(r'"',' ',s2)
s2=re.sub(r'-',' ',s2)
s2=re.sub(r':',' ',s2)
s2=re.sub(r' ','',s2)
c[t].append(s2)
for q in range(len(d)):
s2=re.sub(r',',' ',d[q][0])
s2=s2.lower()
s2=re.sub(r'series',' ',s2)
s2=re.sub(r'episode',' ',s2)
s2=re.sub(r'season',' ',s2)
s2=re.sub(r'"',' ',s2)
s2=re.sub(r'-',' ',s2)
s2=re.sub(r':',' ',s2)
s2=re.sub(r' ','',s2)
d[q].append(s2)
ff={}
for j in range(len(b)):
d1=float(Levenshtein.ratio(title1,b[j][len(b[j])-2]))
ff[str(b[j][2])+"t"+str(b[j][3]).strip()+"t"+b[j][len(b[j])-1].
strip()]=d1
qq=title1
dd[qq]=str(max(ff.iteritems(),key=operator.itemgetter(1))[0])
my_list=qq+"t"+dd[qq]
# max_value=float(max(max_value,d))
return gg
if __name__ == "__main__":
app.run()

kk=my_list.split("t")
matchObj=re.match(r'(.+)-(.+)-(.+)',kk[3].strip())
if matchObj:
ee={}
cc={}
for e in range(len(c)):
d2=float(Levenshtein.ratio(str(matchObj.group(1)),c[e][len(c[e]
)-1]))
ee[str(c[e][0]+"t"+c[e][1].strip())]=d2
for r in range(len(d)):
d3=float(Levenshtein.ratio(str(matchObj.group(1)+matchObj.group
(2)),d[r][len(d[r])-1]))
cc[str(d[r][0]+"t"+d[r][1].strip())]=d3
uu=str(max(ff.iteritems(),key=operator.itemgetter(1))[0]).split("t
")
gg['matched title']=uu[1]
gg['matched title WPR_ID']=uu[0]
gg['matched title confidence level']=max(ff.iteritems(),key=
operator.itemgetter(1))[1]
return gg
if __name__ == "__main__":
app.run()
然后 run 这个 web service , ./some.py, 再call:
links http://localhost:8080/title_matching2?title=diehard
会返回一个hash table. 正是我想要的。
但是如果run multiple parameters 的话:
代码如下:
#!/usr/bin/env python
import web
import csv
import difflib
import re
import operator
import Levenshtein
urls = ('/title_matching4','title_matching4')
app = web.application(urls,globals())
class title_matching4:
def __init__(self):
self.hello = "hello world"
def GET(self):
getInput = web.input(title="World",prod="type")
b=[]
with open("Book3.txt","rb") as k:
for row in k:
b.append(row.split("t"))
k.close()
dd={}
ee={}
cc={}
qq={}
title1=str(getInput.title)
prod1=str(getInput.prod)
title1=re.sub(r',',' ',title1)
title1=title1.lower()
title1=re.sub(r'series',' ',title1)
title1=re.sub(r'episode',' ',title1)
title1=re.sub(r'season',' ',title1)
title1=re.sub(r'"',' ',title1)
title1=re.sub(r'-',' ',title1)
title1=re.sub(r':',' ',title1)
title1=re.sub(r' ','',title1)
prod1=prod1.lower()
s4=title1+prod1
for j in range(len(b)):
s2=re.sub(r',',' ',str(b[j][1]))
s2=s2.lower()
s2=re.sub(r'series',' ',s2)
s2=re.sub(r'episode',' ',s2)
s2=re.sub(r'season',' ',s2)
s2=re.sub(r'"',' ',s2)
s2=re.sub(r'-',' ',s2)
s2=re.sub(r':',' ',s2)
s2=re.sub(r' ','',s2)
s3=str(b[j][3].strip())
s3=s3.lower()
s5=s2+s3
b[j].append(s5)
ff={}
for j in range(len(b)):
d1=float(Levenshtein.ratio(s4,b[j][len(b[j])-1]))
ff[str(b[j][1]).strip()+"t"+str(b[j][2]).strip()+"t"+str(b[j][3
]).strip()]=d1
qqq=title1
dd[qqq]=str(max(ff.iteritems(),key=operator.itemgetter(1))[0])+"t"+
str(max(ff.iteritems(),key=operator.itemgetter(1))[1])
my_list=dd[qqq]+"t"+qqq
# max_value=float(max(max_value,d))
my_list1=my_list.split("t")
qq['matched title']=my_list1[0]
qq['matched WPR_id']=my_list1[1]
qq['matched title confidence level']=my_list1[3]
return qq
if __name__ == "__main__":
app.run()
然后再run ./rest9.py
再打开一个link:
links http://localhost:8080/title_matching4?title=diehard&prod=feature
就没有hash table 返回。虽然我想返回hash table.
只是出现如下东西在screen 上:
ubuntu@ip-10-0-0-126:~$ links http://localhost:8080/title_matching4?title=diehard&prod=feature
[1] 1190
请问这是为什么?
为什么不能打开一个连接,并有hash table 返回?
多谢!
1 (共1页)
进入CS版参与讨论
相关主题
google site mapping 怎么弄?谢谢! (转载)memorial page for Prof. Hongjun Lu
请问怎么去除 IE/Google chrome的redirect 病毒?Re: 关于网叶连接的路径问题,急救!!
technical question: click fraud detection? (转载)写paper引用wiki的东西可以吗?
Re: networking is first - faculty posit.help with Programming!!!
请问一个在mac上面openmpi 安装的问题google没法用,大虾谁帮我看一下? (转载)
[合集] destro爱CS - episode 1 (EE chanllenge CS)请教:如何用Java get URL content是.swe并且是utf-16 encoding (转载)
[合集] REVISION Re: destro爱CS - episode 1再弱问java:这是java version不同造成的吗? (转载)
Latex problem问个关于socket问题
相关话题的讨论汇总
话题: s2话题: title1话题: str话题: title话题: __