java.lang.UnsatisfiedLinkError: org.apache.hadoop.io.nativeio.NativeIO错误的处理方法,适用于hadoop3.1.3

在遇到java.lang.UnsatisfiedLinkError: org.apache.hadoop.io.nativeio.NativeIO错误时,常规解决方案如创建NativeIO.java文件并未奏效,反而带来新的bug。通过在GitHub上寻找,找到了一个简单有效的解决办法:从特定网址下载winutils.zip,解压后将文件复制到指定路径,从而成功解决问题。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

 

Exception in thread "main" java.lang.UnsatisfiedLinkError: org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Ljava/lang/String;I)Z
	at org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Native Method)
	at org.apache.hadoop.io.nativeio.NativeIO$Windows.access(NativeIO.java:609)
	at org.apache.hadoop.fs.FileUtil.canRead(FileUtil.java:977)
	at org.apache.hadoop.util.DiskChecker.checkAccessByFileMethods(DiskChecker.java:187)
	at org.apache.hadoop.util.DiskChecker.checkDirAccess(DiskChecker.java:174)
	at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:108)
	at org.apache.hadoop.fs.LocalDirAllocator$AllocatorPerContext.confChanged(LocalDirAllocator.java:285)
	at org.apache.hadoop.fs.LocalDirAllocator$AllocatorPerContext.getLocalPathForWrite(LocalDirAllocator.java:344)
	at org.apache.hadoop.fs.LocalDirAllocator.getLocalPathForWrite(LocalDirAllocator.java:150)
	at org.apache.hadoop.fs.LocalDirAllocator.getLocalPathForWrite(LocalDirAllocator.java:131)
	at org.apache.hadoop.fs.LocalDirAllocator.getLocalPathForWrite(LocalDirAllocator.java:115)
	at org.apache.hadoop.mapred.LocalDistributedCacheManager.setup(LocalDistributedCacheManager.java:125)
	at org.apache.hadoop.mapred.LocalJobRunner$Job.<init>(LocalJobRunner.java:163)
	at org.apache.hadoop.mapred.LocalJobRunner.submitJob(LocalJobRunner.java:731)
	at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:240)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290)
	at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
	at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287)
	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308)
	at com.qs.WordcountDriver.main(WordcountDriver.java:44)

一开始遇到这个bug我是一脸懵逼毫无头绪的QAQ,但是在网上搜索之后发现其他人也是一脸懵逼毫无头绪心理稍微平衡了些许(●'◡'●),然后开始寻找这解决方法。起初,网上广为流传的解决方法

1、是在项目中编写一个NativeIO.java文件.

在项目下创建org.apache.hadoop.io.nativeio 并创建NativeIO类:

NaticeIO代码如下:

/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.io.nativeio;

import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import sun.misc.Unsafe;

import com.google.common.annotations.VisibleForTesting;

/**
 * JNI wrappers for various native IO-related calls not available in Java. These
 * functions should generally be used alongside a fallback to another more
 * portable mechanism.
 */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NativeIO {
	public static class POSIX {
		// Flags for open() call from bits/fcntl.h
		public static final int O_RDONLY = 00;
		public static final int O_WRONLY = 01;
		public static final int O_RDWR = 02;
		public static final int O_CREAT = 0100;
		public static final int O_EXCL = 0200;
		public static final int O_NOCTTY = 0400;
		public static final int O_TRUNC = 01000;
		public static final int O_APPEND = 02000;
		public static final int O_NONBLOCK = 04000;
		public static final int O_SYNC = 010000;
		public static final int O_ASYNC = 020000;
		public static final int O_FSYNC = O_SYNC;
		public static final int O_NDELAY = O_NONBLOCK;

		// Flags for posix_fadvise() from bits/fcntl.h
		/* No further special treatment. */
		public static final int POSIX_FADV_NORMAL = 0;
		/* Expect random page references. */
		public static final int POSIX_FADV_RANDOM = 1;
		/* Expect sequential page references. */
		public static final int POSIX_FADV_SEQUENTIAL = 2;
		/* Will need these pages. */
		public static final int POSIX_FADV_WILLNEED = 3;
		/* Don't need these pages. */
		public static final int POSIX_FADV_DONTNEED = 4;
		/* Data will be accessed once. */
		public static final int POSIX_FADV_NOREUSE = 5;

		/*
		 * Wait upon writeout of all pages in the range before performing the write.
		 */
		public static final int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
		/*
		 * Initiate writeout of all those dirty pages in the range which are not
		 * presently under writeback.
		 */
		public static final int SYNC_FILE_RANGE_WRITE = 2;

		/*
		 * Wait upon writeout of all pages in the range after performing the write.
		 */
		public static final int SYNC_FILE_RANGE_WAIT_AFTER = 4;

		private static final Log LOG = LogFactory.getLog(NativeIO.class);

		private static boolean nativeLoaded = false;
		private static boolean fadvisePossible = true;
		private static boolean syncFileRangePossible = true;

		static final String WORKAROUND_NON_THREADSAFE_CALLS_KEY = "hadoop.workaround.non.threadsafe.getpwuid";
		static final boolean WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT = true;

		private static long cacheTimeout = -1;

		private static CacheManipulator cacheManipulator = new CacheManipulator();

		public static CacheManipulator getCacheManipulator() {
			return cacheManipulator;
		}

		public static void setCacheManipulator(CacheManipulator cacheManipulator) {
			POSIX.cacheManipulator = cacheManipulator;
		}

		/**
		 * Used to manipulate the operating system cache.
		 */
		@VisibleForTesting
		public static class CacheManipulator {
			pu
评论 10
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值