bada 中的 XML 解析失败
我做了一个读取 RSS 新闻源的程序。它不适用于某些网站,例如(ABC News、TechCrunch、Engadget)。这些网站的长度超过十万。我使用XPATH方法进行解析。也许这个问题是因为我是Bada新手。我用于 XML 解析的代码是
void
addfeed::OnTransactionReadyToRead(HttpSession& httpSession, HttpTransaction& httpTransaction, int availableBodyLen)
{
pFrame = Osp::App::Application::GetInstance()->GetAppFrame()->GetFrame();
FormMgr* pFormMgr = dynamic_cast<FormMgr*> (pFrame->GetControl("FormMgr"));
bool flag1=0;
AppLog("####### OnTransactionReadyToRead! #######");
HttpResponse* pHttpResponse = httpTransaction.GetResponse();
if (pHttpResponse->GetStatusCode() == NET_HTTP_STATUS_OK)
{
AppLog("%d",availableBodyLen);
HttpHeader* pHttpHeader = pHttpResponse->GetHeader();
String* tempHeaderString = pHttpHeader->GetRawHeaderN();
ByteBuffer* pBuffer = pHttpResponse->ReadBodyN();
String str((const char*)pBuffer->GetPointer());
AppLog("Response: %s",str.GetPointer());
int limit(pBuffer->GetLimit());
AppLog("Limit: %d",limit);
xmlDoc * doc = NULL;
xmlXPathContextPtr xpathCtx;
xmlXPathObjectPtr xpathtitle;
xmlXPathObjectPtr descriptionObj;
xmlXPathObjectPtr linkObj;
xmlXPathObjectPtr pubdateObj;
doc = xmlParseMemory((const char*)pBuffer->GetPointer(),pBuffer->GetLimit());// Here itself my code fails
if(doc==NULL)
{
flag1=1;
AppLog("Failed to load xml doc!");
xmlFreeDoc(doc);
xmlCleanupParser();
}
else
{
AppLog("InTo XML Parsing");
/* Create xpath evaluation context */
xpathCtx = xmlXPathNewContext(doc);
if(xpathCtx == NULL)
{
AppLog("Error: unable to create new XPath context");
xmlFreeDoc(doc);
}
/* Evaluate xpath expression */
xpathtitle = xmlXPathEvalExpression((xmlChar*)"//item/title", xpathCtx);
if(xpathtitle == NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
descriptionObj = xmlXPathEvalExpression((xmlChar*)"//item/description",xpathCtx);
if(descriptionObj == NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
linkObj = xmlXPathEvalExpression((xmlChar*)"//item/link",xpathCtx);
if(linkObj==NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
pubdateObj=xmlXPathEvalExpression((xmlChar*)"//item/pubDate",xpathCtx);
if(pubdateObj==NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
get_xpath_titles1(xpathtitle->nodesetval);
get_xpath_description1(descriptionObj->nodesetval);
get_xpath_link1(linkObj->nodesetval);
get_xpath_pubdate1(pubdateObj->nodesetval);
xmlFreeDoc(doc);
xmlCleanupParser();
delete tempHeaderString;
delete pBuffer;
}
}
else
{
flag1=1;
}
if(flag1==1)
{
pFormMgr->SendUserEvent(FormMgr::REQUEST_ID_ADD_FEED_FORM,null);
}
}
I did a program that reads rss news feeds. Its not working for some of the websites like (ABC News, TechCrunch, Engadget). These websites having their body length more than one lakh. I use XPATH method for parsing. Perhaps this problem due to I am new to Bada. The code which I am using for XML Parsing is
void
addfeed::OnTransactionReadyToRead(HttpSession& httpSession, HttpTransaction& httpTransaction, int availableBodyLen)
{
pFrame = Osp::App::Application::GetInstance()->GetAppFrame()->GetFrame();
FormMgr* pFormMgr = dynamic_cast<FormMgr*> (pFrame->GetControl("FormMgr"));
bool flag1=0;
AppLog("####### OnTransactionReadyToRead! #######");
HttpResponse* pHttpResponse = httpTransaction.GetResponse();
if (pHttpResponse->GetStatusCode() == NET_HTTP_STATUS_OK)
{
AppLog("%d",availableBodyLen);
HttpHeader* pHttpHeader = pHttpResponse->GetHeader();
String* tempHeaderString = pHttpHeader->GetRawHeaderN();
ByteBuffer* pBuffer = pHttpResponse->ReadBodyN();
String str((const char*)pBuffer->GetPointer());
AppLog("Response: %s",str.GetPointer());
int limit(pBuffer->GetLimit());
AppLog("Limit: %d",limit);
xmlDoc * doc = NULL;
xmlXPathContextPtr xpathCtx;
xmlXPathObjectPtr xpathtitle;
xmlXPathObjectPtr descriptionObj;
xmlXPathObjectPtr linkObj;
xmlXPathObjectPtr pubdateObj;
doc = xmlParseMemory((const char*)pBuffer->GetPointer(),pBuffer->GetLimit());// Here itself my code fails
if(doc==NULL)
{
flag1=1;
AppLog("Failed to load xml doc!");
xmlFreeDoc(doc);
xmlCleanupParser();
}
else
{
AppLog("InTo XML Parsing");
/* Create xpath evaluation context */
xpathCtx = xmlXPathNewContext(doc);
if(xpathCtx == NULL)
{
AppLog("Error: unable to create new XPath context");
xmlFreeDoc(doc);
}
/* Evaluate xpath expression */
xpathtitle = xmlXPathEvalExpression((xmlChar*)"//item/title", xpathCtx);
if(xpathtitle == NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
descriptionObj = xmlXPathEvalExpression((xmlChar*)"//item/description",xpathCtx);
if(descriptionObj == NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
linkObj = xmlXPathEvalExpression((xmlChar*)"//item/link",xpathCtx);
if(linkObj==NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
pubdateObj=xmlXPathEvalExpression((xmlChar*)"//item/pubDate",xpathCtx);
if(pubdateObj==NULL)
{
AppLog("Error: unable to evaluate xpath expression");
xmlXPathFreeContext(xpathCtx);
xmlFreeDoc(doc);
}
get_xpath_titles1(xpathtitle->nodesetval);
get_xpath_description1(descriptionObj->nodesetval);
get_xpath_link1(linkObj->nodesetval);
get_xpath_pubdate1(pubdateObj->nodesetval);
xmlFreeDoc(doc);
xmlCleanupParser();
delete tempHeaderString;
delete pBuffer;
}
}
else
{
flag1=1;
}
if(flag1==1)
{
pFormMgr->SendUserEvent(FormMgr::REQUEST_ID_ADD_FEED_FORM,null);
}
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
我和你有同样的问题。这不是解析问题,而是下载问题。
下载大文件时,在您的情况下,大于十万的文件
会被多次调用。
您需要做的是将整个解析代码移至
仅在文件下载完成时调用一次的位置。
我还建议您首先将 xml 保存到文件中。
I had the same problem as you. It is not a parsing problem, rather a download problem.
When downloading large files, in your case larger than one lakh
is called more than once.
What you need to do is move the whole parsing code to
which is called only once, when the file download finishes.
I would also suggest that you first save the xml to a file.