按钮命令 Tkinter Python 的问题

发布于 2024-11-26 21:46:23 字数 7535 浏览 0 评论 0原文

因此,我有一个程序,可以在 SEC Edgar 数据库中搜索年度报告 (10-K),并在列表框中返回包含 40 个不同项目的列表。好吧,我想创建一个“下一个 40”按钮,该按钮显示列表框中的下一个 40 个项目,以下代码完成此操作:

def Next():

global entryWidget

page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
sock = urllib.urlopen(page)
raw = sock.read()
soup = BeautifulSoup(raw)

npar = str(soup.find(value="Next 40"))
index = npar.find('/cgi')
index2 = npar.find('count=40') + len('count=40')
nextpage = 'http://www.sec.gov' + npar[index:index2]

sock2 = urllib.urlopen(nextpage)
raw2 = sock2.read()
soup2 = BeautifulSoup(raw2)

psoup = str(soup2.findAll(nowrap=True))

myparser = MyParser()
myparser.parse(psoup)

filinglist = myparser.get_descriptions()
linklist = myparser.get_hyperlinks()

filinglist = [s for s in filinglist if s != 'Documents']
filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

linklist = [s for s in linklist if not s.startswith('/cgi-')]

Lb1.delete(0, END)

counter = 0

while counter < len(filinglist):
    Lb1.insert(counter, filinglist[counter])
    counter = counter +1

正如您所见,按下按钮时,它会读取原始链接(页面)而不是查找“ html 网站(页面)上的下一个 40" 超链接。然后它解析新的 html 文档 (nextpage),然后获取项目名称和关联的链接。现在这段代码成功从原来的页面转到下一页,但是只能显示下一页。

那么,我如何才能将 (nextpage) 放入原始(页面)中,然后每次按下“下一步”按钮时都能列出 (nextnextpage) html 文档中的项目?抱歉,如果这令人困惑,我真的不知道有任何其他方法可以解释它。

如需更多说明,这里是我要解析的实际站点链接: http://www.sec .gov/cgi-bin/browse-edgar ... getcompany 我希望“下一步”按钮继续从该网站的“下一个 40”按钮检索 html 超链接。

这是我的整个程序代码,以防您需要:

import BeautifulSoup
from BeautifulSoup import BeautifulSoup
import urllib
import sgmllib
from Tkinter import *
import tkMessageBox
import re

class MyParser(sgmllib.SGMLParser):

def parse(self, psoup):
    self.feed(psoup)
    self.close()

def __init__(self, verbose=0):
    sgmllib.SGMLParser.__init__(self, verbose)
    self.descriptions = []
    self.hyperlinks = []
    self.inside_td_element = 0
    self.starting_description = 0

def start_td(self, attributes):
    for name, value in attributes:
        if name == "nowrap":
            self.inside_td_element = 1
            self.starting_description = 1

def end_td(self):
    self.inside_td_element = 0

def start_a(self, attributes):
    for name, value in attributes:
        if name == "href":
            self.hyperlinks.append(value)

def handle_data(self, data):
    if self.inside_td_element:
        if self.starting_description:
            self.descriptions.append(data)
            self.starting_description = 0
        else:
            self.descriptions[-1] += data

def get_descriptions(self):
    return self.descriptions

def get_hyperlinks(self):
    return self.hyperlinks

def Submit():

global entryWidget

if entryWidget.get().strip() == "":
    tkMessageBox.showerror("Tkinter Entry Widget", "Enter a text value")
else:
    page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
    sock = urllib.urlopen(page)
    raw = sock.read()
    soup = BeautifulSoup(raw)
    psoup = str(soup.findAll(nowrap=True))
    myparser = MyParser()
    myparser.parse(psoup)

    filinglist = myparser.get_descriptions()
    linklist = myparser.get_hyperlinks()

    filinglist = [s for s in filinglist if s != 'Documents']
    filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
    filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

    linklist = [s for s in linklist if not s.startswith('/cgi-')]

    counter = 0

    while counter < len(filinglist):
        Lb1.insert(counter, filinglist[counter])
        counter = counter +1

    downloadbutton.configure(state=NORMAL)
    nextbutton.configure(state=NORMAL)

def Next():

global entryWidget

page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
sock = urllib.urlopen(page)
raw = sock.read()
soup = BeautifulSoup(raw)

npar = str(soup.find(value="Next 40"))
index = npar.find('/cgi')
index2 = npar.find('count=40') + len('count=40')
nextpage = 'http://www.sec.gov' + npar[index:index2]

sock2 = urllib.urlopen(nextpage)
raw2 = sock2.read()
soup2 = BeautifulSoup(raw2)

psoup = str(soup2.findAll(nowrap=True))

myparser = MyParser()
myparser.parse(psoup)

filinglist = myparser.get_descriptions()
linklist = myparser.get_hyperlinks()

filinglist = [s for s in filinglist if s != 'Documents']
filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

linklist = [s for s in linklist if not s.startswith('/cgi-')]

Lb1.delete(0, END)

counter = 0

while counter < len(filinglist):
    Lb1.insert(counter, filinglist[counter])
    counter = counter +1

previousbutton.configure(state=NORMAL)
nextbutton.configure(state=DISABLED)

def Previous():

global entryWidget

page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
sock = urllib.urlopen(page)
raw = sock.read()
soup = BeautifulSoup(raw)

psoup = str(soup.findAll(nowrap=True))

myparser = MyParser()
myparser.parse(psoup)

filinglist = myparser.get_descriptions()
linklist = myparser.get_hyperlinks()

filinglist = [s for s in filinglist if s != 'Documents']
filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

linklist = [s for s in linklist if not s.startswith('/cgi-')]

Lb1.delete(0, END)

counter = 0

while counter < len(filinglist):
    Lb1.insert(counter, filinglist[counter])
    counter = counter +1

nextbutton.configure(state=NORMAL)
previousbutton.configure(state=DISABLED)

if __name__ == "__main__":

root = Tk()
root.title("SEC Edgar Search")
root["padx"] = 10
root["pady"] = 25

top = Frame(root)
bottom = Frame(root)
bottom2 = Frame(root)
top.pack(side=TOP)
bottom.pack(side=BOTTOM, fill=BOTH, expand=True)
bottom2.pack(side=BOTTOM, fill=BOTH, expand=True)

textFrame = Frame(root)

entryLabel = Label(textFrame)
entryLabel["text"] = "Ticker symbol:"
entryLabel.pack(side=TOP)

entryWidget = Entry(textFrame)
entryWidget["width"] = 15
entryWidget.pack(side=LEFT)

textFrame.pack()

scrollbar = Scrollbar(root)
scrollbar.pack(side=RIGHT, fill=Y)

Lb1 =  Listbox(root, width=20, height=15, yscrollcommand=scrollbar.set, selectmode=EXTENDED)
Lb1.pack()

scrollbar.config(command=Lb1.yview)

submitbutton = Button(root, text="Submit", command=Submit)
submitbutton.pack(in_=bottom2, side=TOP)

downloadbutton = Button(root, text="Download")
downloadbutton.pack(in_=bottom2, side=TOP)
downloadbutton.configure(state=DISABLED)

previousbutton = Button(root, text="Previous 40", command=Previous)
previousbutton.pack(in_=bottom, side=LEFT)
previousbutton.configure(state=DISABLED)

nextbutton = Button(root, text="Next 40", command=Next)
nextbutton.pack(in_=bottom, side=LEFT)
nextbutton.configure(state=DISABLED)

root.mainloop()

So I have this program that searches the SEC Edgar database for annual reports (10-K's), and returns a list of 40 different items in a listbox. Well I want to create a 'Next 40' button which displays the next 40 items in the listbox, which the following code accomplishes:

def Next():

global entryWidget

page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
sock = urllib.urlopen(page)
raw = sock.read()
soup = BeautifulSoup(raw)

npar = str(soup.find(value="Next 40"))
index = npar.find('/cgi')
index2 = npar.find('count=40') + len('count=40')
nextpage = 'http://www.sec.gov' + npar[index:index2]

sock2 = urllib.urlopen(nextpage)
raw2 = sock2.read()
soup2 = BeautifulSoup(raw2)

psoup = str(soup2.findAll(nowrap=True))

myparser = MyParser()
myparser.parse(psoup)

filinglist = myparser.get_descriptions()
linklist = myparser.get_hyperlinks()

filinglist = [s for s in filinglist if s != 'Documents']
filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

linklist = [s for s in linklist if not s.startswith('/cgi-')]

Lb1.delete(0, END)

counter = 0

while counter < len(filinglist):
    Lb1.insert(counter, filinglist[counter])
    counter = counter +1

As you can see when the button is pressed, it reads the original link (page) than looks for the "Next 40" hyperlink on the html website (page). It then parses the new html document (nextpage), then subsequently gets the item names and associated links. Now this code successfully goes to the next page from the original page, but it can only display the one next page.

So how would I be able to make (nextpage) into the original (page) and then be able to list the items from the (nextnextpage) html document everytime I press the 'Next' button? Sorry if that was confusing, I don't really know any other way to explain it.

For more clarification here is the actual site link I want to parse: http://www.sec.gov/cgi-bin/browse-edgar ... getcompany
I want the 'Next' button to keep retrieving the html hyperlink from that sites 'Next 40' button.

Here is my entire program code in case you need it:

import BeautifulSoup
from BeautifulSoup import BeautifulSoup
import urllib
import sgmllib
from Tkinter import *
import tkMessageBox
import re

class MyParser(sgmllib.SGMLParser):

def parse(self, psoup):
    self.feed(psoup)
    self.close()

def __init__(self, verbose=0):
    sgmllib.SGMLParser.__init__(self, verbose)
    self.descriptions = []
    self.hyperlinks = []
    self.inside_td_element = 0
    self.starting_description = 0

def start_td(self, attributes):
    for name, value in attributes:
        if name == "nowrap":
            self.inside_td_element = 1
            self.starting_description = 1

def end_td(self):
    self.inside_td_element = 0

def start_a(self, attributes):
    for name, value in attributes:
        if name == "href":
            self.hyperlinks.append(value)

def handle_data(self, data):
    if self.inside_td_element:
        if self.starting_description:
            self.descriptions.append(data)
            self.starting_description = 0
        else:
            self.descriptions[-1] += data

def get_descriptions(self):
    return self.descriptions

def get_hyperlinks(self):
    return self.hyperlinks

def Submit():

global entryWidget

if entryWidget.get().strip() == "":
    tkMessageBox.showerror("Tkinter Entry Widget", "Enter a text value")
else:
    page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
    sock = urllib.urlopen(page)
    raw = sock.read()
    soup = BeautifulSoup(raw)
    psoup = str(soup.findAll(nowrap=True))
    myparser = MyParser()
    myparser.parse(psoup)

    filinglist = myparser.get_descriptions()
    linklist = myparser.get_hyperlinks()

    filinglist = [s for s in filinglist if s != 'Documents']
    filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
    filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

    linklist = [s for s in linklist if not s.startswith('/cgi-')]

    counter = 0

    while counter < len(filinglist):
        Lb1.insert(counter, filinglist[counter])
        counter = counter +1

    downloadbutton.configure(state=NORMAL)
    nextbutton.configure(state=NORMAL)

def Next():

global entryWidget

page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
sock = urllib.urlopen(page)
raw = sock.read()
soup = BeautifulSoup(raw)

npar = str(soup.find(value="Next 40"))
index = npar.find('/cgi')
index2 = npar.find('count=40') + len('count=40')
nextpage = 'http://www.sec.gov' + npar[index:index2]

sock2 = urllib.urlopen(nextpage)
raw2 = sock2.read()
soup2 = BeautifulSoup(raw2)

psoup = str(soup2.findAll(nowrap=True))

myparser = MyParser()
myparser.parse(psoup)

filinglist = myparser.get_descriptions()
linklist = myparser.get_hyperlinks()

filinglist = [s for s in filinglist if s != 'Documents']
filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

linklist = [s for s in linklist if not s.startswith('/cgi-')]

Lb1.delete(0, END)

counter = 0

while counter < len(filinglist):
    Lb1.insert(counter, filinglist[counter])
    counter = counter +1

previousbutton.configure(state=NORMAL)
nextbutton.configure(state=DISABLED)

def Previous():

global entryWidget

page = 'http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + entryWidget.get().strip() + '&filenum=&State=&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany'
sock = urllib.urlopen(page)
raw = sock.read()
soup = BeautifulSoup(raw)

psoup = str(soup.findAll(nowrap=True))

myparser = MyParser()
myparser.parse(psoup)

filinglist = myparser.get_descriptions()
linklist = myparser.get_hyperlinks()

filinglist = [s for s in filinglist if s != 'Documents']
filinglist = [s for s in filinglist if s != 'Documents Interactive Data']
filinglist = [s for s in filinglist if not re.match(r'\d{3}-', s)]

linklist = [s for s in linklist if not s.startswith('/cgi-')]

Lb1.delete(0, END)

counter = 0

while counter < len(filinglist):
    Lb1.insert(counter, filinglist[counter])
    counter = counter +1

nextbutton.configure(state=NORMAL)
previousbutton.configure(state=DISABLED)

if __name__ == "__main__":

root = Tk()
root.title("SEC Edgar Search")
root["padx"] = 10
root["pady"] = 25

top = Frame(root)
bottom = Frame(root)
bottom2 = Frame(root)
top.pack(side=TOP)
bottom.pack(side=BOTTOM, fill=BOTH, expand=True)
bottom2.pack(side=BOTTOM, fill=BOTH, expand=True)

textFrame = Frame(root)

entryLabel = Label(textFrame)
entryLabel["text"] = "Ticker symbol:"
entryLabel.pack(side=TOP)

entryWidget = Entry(textFrame)
entryWidget["width"] = 15
entryWidget.pack(side=LEFT)

textFrame.pack()

scrollbar = Scrollbar(root)
scrollbar.pack(side=RIGHT, fill=Y)

Lb1 =  Listbox(root, width=20, height=15, yscrollcommand=scrollbar.set, selectmode=EXTENDED)
Lb1.pack()

scrollbar.config(command=Lb1.yview)

submitbutton = Button(root, text="Submit", command=Submit)
submitbutton.pack(in_=bottom2, side=TOP)

downloadbutton = Button(root, text="Download")
downloadbutton.pack(in_=bottom2, side=TOP)
downloadbutton.configure(state=DISABLED)

previousbutton = Button(root, text="Previous 40", command=Previous)
previousbutton.pack(in_=bottom, side=LEFT)
previousbutton.configure(state=DISABLED)

nextbutton = Button(root, text="Next 40", command=Next)
nextbutton.pack(in_=bottom, side=LEFT)
nextbutton.configure(state=DISABLED)

root.mainloop()

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

口干舌燥 2024-12-03 21:46:23

使用应用程序类而不是全局变量。目前您总是下载第一页。但是您的应用程序类应该缓存当前页面的“soup”,next 使用它从“Next 40”表单按钮获取 onClick 值:

class Application(Frame):
    def __init__(self, parent=None):
        Frame.__init__(self, parent)
        self.pack()

        self.top = Frame(self)
        self.bottom = Frame(self)
        self.bottom2 = Frame(self)
        self.top.pack(side=TOP)
        self.bottom.pack(side=BOTTOM, fill=BOTH, expand=True)
        self.bottom2.pack(side=BOTTOM, fill=BOTH, expand=True)
        #... 
        self.submitbutton = Button(self, text="Submit", command=self.submit)
        self.submitbutton.pack(in_=self.bottom2, side=TOP)
        #...

    #...

    def submit(self):
        page = ('http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + 
                 self.entryWidget.get().strip() + 
                '&filenum=&State=&Country=&SIC=&owner=exclude' 
                '&Find=Find+Companies&action=getcompany')
        #...
        self.soup = ...

    def next(self):
        #...
        #there must be a better way than this to extract the onclick value
        #but I don't use/know BeautifulSoup to help with this part

        npar = str(self.soup.find(value="Next 40"))
        index1 = npar.find('/cgi')
        index2 = npar.find('count=40') + len('count=40')  
        page = 'http://www.sec.gov' + npar[index1:index2]

        sock = urllib.urlopen(page)
        raw = sock.read()
        self.soup = BeautifulSoup(raw)

        #...

if __name__ == '__main__':
    root = Tk()
    root.title("SEC Edgar Search")
    root["padx"] = 10
    root["pady"] = 25

    app = Application(root)

    app.mainloop()
    root.destroy()

对于每个新页面,onClick 链接都会更新 & 内容。启动参数。因此,您也可以在类中增加一个计数器,而不必费心解析当前的汤来获取值。

Use an Application class instead of globals. Currently you're always downloading the first page. But your application class should cache the 'soup' of the current page, which next uses to get the onClick value from the "Next 40" form button:

class Application(Frame):
    def __init__(self, parent=None):
        Frame.__init__(self, parent)
        self.pack()

        self.top = Frame(self)
        self.bottom = Frame(self)
        self.bottom2 = Frame(self)
        self.top.pack(side=TOP)
        self.bottom.pack(side=BOTTOM, fill=BOTH, expand=True)
        self.bottom2.pack(side=BOTTOM, fill=BOTH, expand=True)
        #... 
        self.submitbutton = Button(self, text="Submit", command=self.submit)
        self.submitbutton.pack(in_=self.bottom2, side=TOP)
        #...

    #...

    def submit(self):
        page = ('http://www.sec.gov/cgi-bin/browse-edgar?company=&match=&CIK=' + 
                 self.entryWidget.get().strip() + 
                '&filenum=&State=&Country=&SIC=&owner=exclude' 
                '&Find=Find+Companies&action=getcompany')
        #...
        self.soup = ...

    def next(self):
        #...
        #there must be a better way than this to extract the onclick value
        #but I don't use/know BeautifulSoup to help with this part

        npar = str(self.soup.find(value="Next 40"))
        index1 = npar.find('/cgi')
        index2 = npar.find('count=40') + len('count=40')  
        page = 'http://www.sec.gov' + npar[index1:index2]

        sock = urllib.urlopen(page)
        raw = sock.read()
        self.soup = BeautifulSoup(raw)

        #...

if __name__ == '__main__':
    root = Tk()
    root.title("SEC Edgar Search")
    root["padx"] = 10
    root["pady"] = 25

    app = Application(root)

    app.mainloop()
    root.destroy()

For each new page the onClick link updates the &Start parameter. So alternatively you could increment a counter for that in your class without bothering to parse the current soup to get the value.

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文