But having hadoop0.20.0 of dbinputformat not to support Oracle's ah? I would urge large cattle wing. .
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Iterator;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.lib.LongSumReducer;
import org.apache.hadoop.mapred.lib.db.DBConfiguration;
import org.apache.hadoop.mapred.lib.db.DBInputFormat;
import org.apache.hadoop.mapred.lib.db.DBOutputFormat;
import org.apache.hadoop.mapred.lib.db.DBWritable;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.hsqldb.Server;
public class WordCountDB {
private static final String DRIVER_CLASS = "com.oracle.jdbc.Driver"; / / database engine
private static final String DB_URL = "jdbc: oracle :/ / 127.0.0.1:1521 / BUS"; / / address of the database
private static final String DB_USER = "manage_bus"; / / database name
private static final String DB_PASSWD = "its312"; / / Password
private static final String [] FieldNames = {"name", "age"};
private static Connection connection; / / database connection
public static boolean initialized = false; / / determine database connection
public static class TokenizerMapper extends MapReduceBase implements
Mapper
@ Override
public void map (LongWritable key, TeacherRecord value, OutputCollector
throws IOException
{
System.out.println ("enter the map function");
output.collect (new Text (value.name), new DoubleWritable (value.age));
}
}
public static class IntSumReducer extends MapReduceBase implements
Reducer <Text,DoubleWritable,TeacherRecord,NullWritable> {
NullWritable n = NullWritable.get ();
@ Override
public void reduce (Text key, Iterator
OutputCollector
throws IOException {
/ / TODO Auto-generated method stub
System.out.println ("enter the reduce function");
double sum = 0;
while (values.hasNext ()) {
sum + = values.next (). get ();
}
output.collect (new TeacherRecord (key.toString (), sum), n);
}
}
static class TeacherRecord implements Writable, DBWritable
{
String name;
double age;
public TeacherRecord (String m_name, double m_age) {
this.name = m_name;
this.age = m_age;
}
@ Override
public void readFields (DataInput in) throws IOException {
this.name = Text.readString (in);
this.age = in.readDouble ();
}
@ Override
public void write (DataOutput out) throws IOException {
Text.writeString (out, this.name);
out.writeDouble (age);
}
@ Override
public void readFields (ResultSet resultSet) throws SQLException {
this.name = resultSet.getString (1);
this.age = resultSet.getDouble (2);
}
@ Override
public void write (PreparedStatement statement) throws SQLException {
statement.setString (1, this.name);
statement.setDouble (2, this.age);
}
}
public static void main (String [] args) throws Exception
{
String driverClassName = DRIVER_CLASS;
String url = DB_URL;
String dbuser = DB_USER;
String dbpw = DB_PASSWD;
/ / map-reduce setting
/ / JobConf job = new JobConf (getConf (), WordCountDB.class); / /???? doubt getConf is doing
JobConf job = new JobConf (WordCountDB.class);
job.setJobName ("Count from DB");
job.setMapperClass (TokenizerMapper.class);
job.setCombinerClass (IntSumReducer.class);
job.setReducerClass (IntSumReducer.class);
job.setInputFormat (DBInputFormat.class);
DBConfiguration.configureDB (job, driverClassName, url, dbuser, dbpw); / / connect to database
DBInputFormat.setInput (job, TeacherRecord.class, "teacher", null, "name", FieldNames);
job.setMapOutputKeyClass (Text.class);
job.setMapOutputValueClass (DoubleWritable.class);
DBOutputFormat.setOutput (job, "teacher", FieldNames);
job.setOutputKeyClass (TeacherRecord.class);
job.setOutputValueClass (NullWritable.class);
try
{
JobClient.runJob (job);
}
catch (Exception e)
{
e.printStackTrace ();
System.out.println ("abnormal operation");
}
}
the following error:
java.lang.RuntimeException: Error in configuring object
at org.apache.hadoop.util.ReflectionUtils.setJobConf (ReflectionUtils.java: 93)
at org.apache.hadoop.util.ReflectionUtils.setConf (ReflectionUtils.java: 64)
at org.apache.hadoop.util.ReflectionUtils.newInstance (ReflectionUtils.java: 117)
at org.apache.hadoop.mapred.JobConf.getInputFormat (JobConf.java: 400)
at org.apache.hadoop.mapred.JobClient.writeOldSplits (JobClient.java: 810)
at org.apache.hadoop.mapred.JobClient.submitJobInternal (JobClient.java: 781)
at org.apache.hadoop.mapred.JobClient.submitJob (JobClient.java: 730)
at org.apache.hadoop.mapred.JobClient.runJob (JobClient.java: 1249)
at WordCountDB.main (WordCountDB.java: 205)
tangled for a long time, I hope you can help out, thank you!
------ Solution ---------------------------------------- ----
brothers you play enough profound, Map / Reduce are started up. While also doing cloud computing-related business, but did not come into contact with the actual Map / Reduce programming, can not help you. Friendship top.
------ Solution ---------------------------------------- ----
Database recommend using HBase, Hive
------ For reference only ------------------------- --------------
But, according to business requirements, data that is under Oracle, no way ah. .
I was followed by a senior postdoctoral study only, fourth year, not how will. .
------ For reference only -------------------------------------- -
this problem has been solved, in cygwin and HADOOP_CLASSPATH to add CLASSPATH and environment variables, addresses are HADOOP / LIB, and then all of Oracle's entire library copying HADOOP / LIB can be. But there will be another problem, Error reading task outputhttp :/ / chengzhf: 50060/tasklog? Plaintext = true & taskid = attempt_201108201645_0004_m_000001_0 & filter = stdout
To a long time to complete the job of operations. .
begged the god to appear. . . .
------ For reference only -------------------------------------- -
same neighborhoods! ! !
------ For reference only -------------------------------------- -
i really want to know how to solve the problem ..
------ For reference only ------------- --------------------------
I Nutch1.6 also reported this error, what is what causes it? Thank you.
hoodp log files
java.lang.RuntimeException: Error in configuring object
at org.apache.hadoop.util.ReflectionUtils.setJobConf (ReflectionUtils.java: 93)
at org.apache.hadoop.util.ReflectionUtils.setConf (ReflectionUtils.java: 64)
at org.apache.hadoop.util.ReflectionUtils.newInstance (ReflectionUtils.java: 117)
at org.apache.hadoop.mapred.MapTask.runOldMapper (MapTask.java: 432)
at org.apache.hadoop.mapred.MapTask.run (MapTask.java: 372)
at org.apache.hadoop.mapred.LocalJobRunner $ Job.run (LocalJobRunner.java: 212)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0 (Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java: 39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java: 25)
at java.lang.reflect.Method.invoke (Method.java: 597)
at org.apache.hadoop.util.ReflectionUtils.setJobConf (ReflectionUtils.java: 88)
... 5 more
Caused by: java.lang.RuntimeException: Error in configuring object
at org.apache.hadoop.util.ReflectionUtils.setJobConf (ReflectionUtils.java: 93)
at org.apache.hadoop.util.ReflectionUtils.setConf (ReflectionUtils.java: 64)
at org.apache.hadoop.util.ReflectionUtils.newInstance (ReflectionUtils.java: 117)
at org.apache.hadoop.mapred.MapRunner.configure (MapRunner.java: 34)
... 10 more
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0 (Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke (NativeMethodAccessorImpl.java: 39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java: 25)
at java.lang.reflect.Method.invoke (Method.java: 597)
at org.apache.hadoop.util.ReflectionUtils.setJobConf (ReflectionUtils.java: 88)
... 13 more
Caused by: java.lang.RuntimeException: x point org.apache.nutch.net.URLNormalizer not found.
at org.apache.nutch.net.URLNormalizers.
at org.apache.nutch.crawl.Injector $ InjectMapper.configure (Injector.java: 74)
... 18 more
console error job failed
I am also getting the same error. Someone please help!
回复删除