更多例子


从MySQL里的persons表读入,过滤 name= nick,写出到HDFS

JSON

{ "connector.mysqlin.jdbc.url":"jdbc:mysql://mysqltest:3306/testdb", "connector.mysqlin.jdbc.username":"dbuser", "connector.mysqlin.jdbc.password":"12345678", "connector.mysqlin.table.name":"persons",

"connector.hdfsout.output.path":"hdfs://master:9000/user/foo/test1", 

"sparkjob.input.connectors":"com.exceeddata.ac.connector.mysql.MySQLInputConnector?id=mysqlin&output=data1",    
"sparkjob.analytics.operations":"Filter?input=data1&output=data2(expression = `name = 'nick'`)"
"sparkjob.output.connectors":"com.exceeddata.ac.connector.hdfs.HDFSOutputConnector?id=hdfsout&input=data2"

}

从MySQL里的persons表读入,Join HDFS里的enrollment文件夹,写出到HDFS

JSON

{ "connector.mysqlin.jdbc.url":"jdbc:mysql://mysqltest:3306/testdb", "connector.mysqlin.jdbc.username":"dbuser", "connector.mysqlin.jdbc.password":"12345678", "connector.mysqlin.table.name":"persons",

"connector.hdfsin.input.path":"hdfs://master:9000/user/root/tmp/enrollment",  
"connector.hdfsout.output.path":"hdfs://master:9000/user/root/tmp/test2",      

"sparkjob.input.connectors":"com.exceeddata.ac.connector.mysql.MySQLInputConnector?id=mysqlin&output=data1;com.exceeddata.ac.connector.hdfs.HDFSInputConnector?id=hdfsin&output=data2",    
"sparkjob.analytics.operations":"ReduceJoin?input1=data1&input2=data2&output=data3(expressions = `pid`, left = `sid`, right = `pid`)"
"sparkjob.output.connectors":"com.exceeddata.ac.connector.hdfs.HDFSOutputConnector?id=hdfsout&input=data3"

}