解析二进制文件时出错
将 PDFBOX 版本升级到 1.6.0 后,我尝试使用 Apache Tika 解析 pdf 文件...并且我开始在少数 pdf 文件中收到此错误。 有什么建议吗?
java.io.IOException: expected='endstream' actual='' org.apache.pdfbox.io.PushBackInputStream@3a72d4e5
at org.apache.pdfbox.pdfparser.BaseParser.parseCOSStream(BaseParser.java:439)
at org.apache.pdfbox.pdfparser.PDFParser.parseObject(PDFParser.java:552)
at org.apache.pdfbox.pdfparser.PDFParser.parse(PDFParser.java:184)
at org.apache.pdfbox.pdmodel.PDDocument.load(PDDocument.java:1088)
at org.apache.pdfbox.pdmodel.PDDocument.load(PDDocument.java:1053)
at org.apache.tika.parser.pdf.PDFParser.parse(PDFParser.java:74)
at org.apache.tika.parser.CompositeParser.parse(CompositeParser.java:197)
at org.apache.tika.parser.CompositeParser.parse(CompositeParser.java:197)
at org.apache.tika.parser.AutoDetectParser.parse(AutoDetectParser.java:135)
at org.apache.tika.Tika.parseToString(Tika.java:357)
at edu.uci.ics.crawler4j.crawler.BinaryParser.parse(BinaryParser.java:37)
at edu.uci.ics.crawler4j.crawler.WebCrawler.handleBinary(WebCrawler.java:223)
at edu.uci.ics.crawler4j.crawler.WebCrawler.processPage(WebCrawler.java:461)
at edu.uci.ics.crawler4j.crawler.WebCrawler.run(WebCrawler.java:129)
at java.lang.Thread.run(Thread.java:662)
WARN [Crawler 2] Did not found XRef object at specified startxref position 0
这是我的代码。
if (page.isBinary()) {
handleBinary(page, curURL);
}
-------------------------------------------------------------------------------
public int handleBinary(Page page, WebURL curURL) {
try {
binaryParser.parse(page.getBinaryData());
page.setText(binaryParser.getText());
handleMetaData(page, binaryParser.getMetaData());
//System.out.println(" pdf url " +page.getWebURL().getURL());
//System.out.println("Text" +page.getText());
} catch (Exception e) {
// TODO: handle exception
}
return PROCESS_OK;
}
public class BinaryParser {
private String text;
private Map<String, String> metaData;
private Tika tika;
public BinaryParser() {
tika = new Tika();
}
public void parse(byte[] data) {
InputStream is = null;
try {
is = new ByteArrayInputStream(data);
text = null;
Metadata md = new Metadata();
metaData = new HashMap<String, String>();
text = tika.parseToString(is, md).trim();
processMetaData(md);
} catch (Exception e) {
e.printStackTrace();
} finally {
IOUtils.closeQuietly(is);
}
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
private void processMetaData(Metadata md){
if ((getMetaData() == null) || (!getMetaData().isEmpty())) {
setMetaData(new HashMap<String, String>());
}
for (String name : md.names()){
getMetaData().put(name.toLowerCase(), md.get(name));
}
}
public Map<String, String> getMetaData() {
return metaData;
}
public void setMetaData(Map<String, String> metaData) {
this.metaData = metaData;
}
}
public class Page {
private WebURL url;
private String html;
// Data for textual content
private String text;
private String title;
private String keywords;
private String authors;
private String description;
private String contentType;
private String contentEncoding;
// binary data (e.g, image content)
// It's null for html pages
private byte[] binaryData;
private List<WebURL> urls;
private ByteBuffer bBuf;
private final static String defaultEncoding = Configurations
.getStringProperty("crawler.default_encoding", "UTF-8");
public boolean load(final InputStream in, final int totalsize,
final boolean isBinary) {
if (totalsize > 0) {
this.bBuf = ByteBuffer.allocate(totalsize + 1024);
} else {
this.bBuf = ByteBuffer.allocate(PageFetcher.MAX_DOWNLOAD_SIZE);
}
final byte[] b = new byte[1024];
int len;
double finished = 0;
try {
while ((len = in.read(b)) != -1) {
if (finished + b.length > this.bBuf.capacity()) {
break;
}
this.bBuf.put(b, 0, len);
finished += len;
}
} catch (final BufferOverflowException boe) {
System.out.println("Page size exceeds maximum allowed.");
return false;
} catch (final Exception e) {
System.err.println(e.getMessage());
return false;
}
this.bBuf.flip();
if (isBinary) {
binaryData = new byte[bBuf.limit()];
bBuf.get(binaryData);
} else {
this.html = "";
this.html += Charset.forName(defaultEncoding).decode(this.bBuf);
this.bBuf.clear();
if (this.html.length() == 0) {
return false;
}
}
return true;
}
public boolean isBinary() {
return binaryData != null;
}
public byte[] getBinaryData() {
return binaryData;
}
I am trying to parse pdf file using Apache Tika after upgrading PDFBOX version to 1.6.0... And I started getting this error for few pdf files.
Any suggestions?
java.io.IOException: expected='endstream' actual='' org.apache.pdfbox.io.PushBackInputStream@3a72d4e5
at org.apache.pdfbox.pdfparser.BaseParser.parseCOSStream(BaseParser.java:439)
at org.apache.pdfbox.pdfparser.PDFParser.parseObject(PDFParser.java:552)
at org.apache.pdfbox.pdfparser.PDFParser.parse(PDFParser.java:184)
at org.apache.pdfbox.pdmodel.PDDocument.load(PDDocument.java:1088)
at org.apache.pdfbox.pdmodel.PDDocument.load(PDDocument.java:1053)
at org.apache.tika.parser.pdf.PDFParser.parse(PDFParser.java:74)
at org.apache.tika.parser.CompositeParser.parse(CompositeParser.java:197)
at org.apache.tika.parser.CompositeParser.parse(CompositeParser.java:197)
at org.apache.tika.parser.AutoDetectParser.parse(AutoDetectParser.java:135)
at org.apache.tika.Tika.parseToString(Tika.java:357)
at edu.uci.ics.crawler4j.crawler.BinaryParser.parse(BinaryParser.java:37)
at edu.uci.ics.crawler4j.crawler.WebCrawler.handleBinary(WebCrawler.java:223)
at edu.uci.ics.crawler4j.crawler.WebCrawler.processPage(WebCrawler.java:461)
at edu.uci.ics.crawler4j.crawler.WebCrawler.run(WebCrawler.java:129)
at java.lang.Thread.run(Thread.java:662)
WARN [Crawler 2] Did not found XRef object at specified startxref position 0
And this is my code.
if (page.isBinary()) {
handleBinary(page, curURL);
}
-------------------------------------------------------------------------------
public int handleBinary(Page page, WebURL curURL) {
try {
binaryParser.parse(page.getBinaryData());
page.setText(binaryParser.getText());
handleMetaData(page, binaryParser.getMetaData());
//System.out.println(" pdf url " +page.getWebURL().getURL());
//System.out.println("Text" +page.getText());
} catch (Exception e) {
// TODO: handle exception
}
return PROCESS_OK;
}
public class BinaryParser {
private String text;
private Map<String, String> metaData;
private Tika tika;
public BinaryParser() {
tika = new Tika();
}
public void parse(byte[] data) {
InputStream is = null;
try {
is = new ByteArrayInputStream(data);
text = null;
Metadata md = new Metadata();
metaData = new HashMap<String, String>();
text = tika.parseToString(is, md).trim();
processMetaData(md);
} catch (Exception e) {
e.printStackTrace();
} finally {
IOUtils.closeQuietly(is);
}
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
private void processMetaData(Metadata md){
if ((getMetaData() == null) || (!getMetaData().isEmpty())) {
setMetaData(new HashMap<String, String>());
}
for (String name : md.names()){
getMetaData().put(name.toLowerCase(), md.get(name));
}
}
public Map<String, String> getMetaData() {
return metaData;
}
public void setMetaData(Map<String, String> metaData) {
this.metaData = metaData;
}
}
public class Page {
private WebURL url;
private String html;
// Data for textual content
private String text;
private String title;
private String keywords;
private String authors;
private String description;
private String contentType;
private String contentEncoding;
// binary data (e.g, image content)
// It's null for html pages
private byte[] binaryData;
private List<WebURL> urls;
private ByteBuffer bBuf;
private final static String defaultEncoding = Configurations
.getStringProperty("crawler.default_encoding", "UTF-8");
public boolean load(final InputStream in, final int totalsize,
final boolean isBinary) {
if (totalsize > 0) {
this.bBuf = ByteBuffer.allocate(totalsize + 1024);
} else {
this.bBuf = ByteBuffer.allocate(PageFetcher.MAX_DOWNLOAD_SIZE);
}
final byte[] b = new byte[1024];
int len;
double finished = 0;
try {
while ((len = in.read(b)) != -1) {
if (finished + b.length > this.bBuf.capacity()) {
break;
}
this.bBuf.put(b, 0, len);
finished += len;
}
} catch (final BufferOverflowException boe) {
System.out.println("Page size exceeds maximum allowed.");
return false;
} catch (final Exception e) {
System.err.println(e.getMessage());
return false;
}
this.bBuf.flip();
if (isBinary) {
binaryData = new byte[bBuf.limit()];
bBuf.get(binaryData);
} else {
this.html = "";
this.html += Charset.forName(defaultEncoding).decode(this.bBuf);
this.bBuf.clear();
if (this.html.length() == 0) {
return false;
}
}
return true;
}
public boolean isBinary() {
return binaryData != null;
}
public byte[] getBinaryData() {
return binaryData;
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
您确定将 PDF 文档加载到 Page 类中的二进制缓冲区时不会意外截断它吗?
您的
Page.load()
方法中存在多个潜在问题。首先,finished + b.length > this.bBuf.capacity()
应该是finished + len > this.bBuf.capacity()
因为 read() 方法返回的字节数可能少于 b.length 字节。另外,你确定你给出的totalsize参数是准确的吗?最后,给定文档可能大于 MAX_DOWNLOAD_SIZE 限制。Are you sure that you don't accidentally truncate the PDF document when you load it into the binary buffer in the Page class?
There are multiple potential problems in your
Page.load()
method. To start with, thefinished + b.length > this.bBuf.capacity()
should befinished + len > this.bBuf.capacity()
since the read() method could have returned fewer than b.length bytes. Also, are you sure that the totalsize argument you give is accurate? Finally, it could be that the given document is larger than theMAX_DOWNLOAD_SIZE
limit.