0

Удар с ошибкой followiong, когда я пытаюсь подключить hbase через искру (используя newhadoopAPIRDD) в HDP 2.4.2. Уже пытался увеличить время RPC в XML-файле hbase, все еще получая то же самое. любая идея, как исправить?Проблема с соединением Spark Hbase

Exception in thread "main" org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=36, exceptions: 
Wed Nov 16 14:59:36 IST 2016, null, java.net.SocketTimeoutException: callTimeout=60000, callDuration=71216: row 'scores,,00000000000000' on table 'hbase:meta' at region=hbase:meta,,1.1588230740, hostname=hklvadcnc06.hk.standardchartered.com,16020,1478491683763, seqNum=0 

    at org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.throwEnrichedException(RpcRetryingCallerWithReadReplicas.java:271) 
    at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:195) 
    at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:59) 
    at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:200) 
    at org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:320) 
    at org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:295) 
    at org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:160) 
    at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:155) 
    at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:821) 
    at org.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.java:193) 
    at org.apache.hadoop.hbase.client.MetaScanner.metaScan(MetaScanner.java:89) 
    at org.apache.hadoop.hbase.client.MetaScanner.allTableRegions(MetaScanner.java:324) 
    at org.apache.hadoop.hbase.client.HRegionLocator.getAllRegionLocations(HRegionLocator.java:88) 
    at org.apache.hadoop.hbase.util.RegionSizeCalculator.init(RegionSizeCalculator.java:94) 
    at org.apache.hadoop.hbase.util.RegionSizeCalculator.<init>(RegionSizeCalculator.java:81) 
    at org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplits(TableInputFormatBase.java:256) 
    at org.apache.hadoop.hbase.mapreduce.TableInputFormat.getSplits(TableInputFormat.java:237) 
    at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:120) 
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239) 
    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237) 
    at scala.Option.getOrElse(Option.scala:120) 
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237) 
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1929) 
    at org.apache.spark.rdd.RDD.count(RDD.scala:1157) 
    at scb.Hbasetest$.main(Hbasetest.scala:85) 
    at scb.Hbasetest.main(Hbasetest.scala) 
Caused by: java.net.SocketTimeoutException: callTimeout=60000, callDuration=71216: row 'scores,,00000000000000' on table 'hbase:meta' at region=hbase:meta,,1.1588230740, hostname=hklvadcnc06.hk.standardchartered.com,16020,1478491683763, seqNum=0 
     at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:159) 
     at org.apache.hadoop.hbase.client.ResultBoundedCompletionService$QueueingFuture.run(ResultBoundedCompletionService.java:64) 
     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) 
     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) 
     at java.lang.Thread.run(Thread.java:745) 
Caused by: org.apache.hadoop.hbase.exceptions.ConnectionClosingException: Call to hklvadcnc06.hk.standardchartered.com/10.20.235.13:16020 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosingException: Connection to hklvadcnc06.hk.standardchartered.com/10.20.235.13:16020 is closing. Call id=9, waitTime=171 
     at org.apache.hadoop.hbase.ipc.RpcClientImpl.wrapException(RpcClientImpl.java:1281) 
     at org.apache.hadoop.hbase.ipc.RpcClientImpl.call(RpcClientImpl.java:1252) 
     at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:213) 
     at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:287) 
     at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$BlockingStub.scan(ClientProtos.java:32651) 
     at org.apache.hadoop.hbase.client.ScannerCallable.openScanner(ScannerCallable.java:372) 
     at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:199) 
     at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:62) 
     at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:200) 
     at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas$RetryingRPC.call(ScannerCallableWithReplicas.java:346) 
     at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas$RetryingRPC.call(ScannerCallableWithReplicas.java:320) 
     at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:126) 
     ... 4 more 
Caused by: org.apache.hadoop.hbase.exceptions.ConnectionClosingException: Connection to hklvadcnc06.hk.standardchartered.com/10.20.235.13:16020 is closing. Call id=9, waitTime=171 
     at org.apache.hadoop.hbase.ipc.RpcClientImpl$Connection.cleanupCalls(RpcClientImpl.java:1078) 
     at org.apache.hadoop.hbase.ipc.RpcClientImpl$Connection.close(RpcClientImpl.java:879) 
     at org.apache.hadoop.hbase.ipc.RpcClientImpl$Connection.run(RpcClientImpl.java:604) 
16/11/16 14:59:36 INFO SparkContext: Invoking stop() from shutdown hook 
+0

Пахнет как клиент HBase lib не может найти свой 'hbase-site.xml' и пытается угадать, где находится ZK. И промахивается. Убедитесь, что «driver extraClasspath» содержит * каталог * '/ etc/hbase/conf' –

+0

Я устал добавлять hbase/conf, как вы упомянули ... все еще его не работает –

ответ

1

Я добавил путь hbase-conf в путь класса hadoop, и проблема была решена.

Спасибо!