diff --git a/cmake/modules/Findhdfs.cmake b/cmake/modules/Findhdfs.cmake
deleted file mode 100644
index 8245aefe7ac1adf77dba1e1e9584fc75194e3c47..0000000000000000000000000000000000000000
--- a/cmake/modules/Findhdfs.cmake
+++ /dev/null
@@ -1,21 +0,0 @@
-# - Locate hdfs (from hadoop) library
-# Defines:
-#
-#  HDFS_FOUND
-#  HDFS_INCLUDE_DIR
-#  HDFS_INCLUDE_DIRS (not cached)
-#  HDFS_LIBRARIES
-
-find_path(HDFS_INCLUDE_DIR NAMES hdfs.h  HINTS ${HDFS_DIR}/include $ENV{HDFS_DIR}/include /usr/include/hadoop)
-find_library(HDFS_LIBRARY NAMES hdfs PATH_SUFFIXES native HINTS ${HDFS_DIR}/lib $ENV{HDFS_DIR}/lib)
-
-set(HDFS_INCLUDE_DIRS ${HDFS_INCLUDE_DIR})
-set(HDFS_LIBRARIES ${HDFS_LIBRARY})
-
-
-# handle the QUIETLY and REQUIRED arguments and set HDFS_FOUND to TRUE if
-# all listed variables are TRUE
-INCLUDE(FindPackageHandleStandardArgs)
-FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDFS DEFAULT_MSG HDFS_INCLUDE_DIR HDFS_LIBRARY)
-
-mark_as_advanced(HDFS_FOUND HDFS_INCLUDE_DIR HDFS_LIBRARY)
diff --git a/cmake/modules/RootBuildOptions.cmake b/cmake/modules/RootBuildOptions.cmake
index e1719c4486fbf1932b732dfe1d4f74a20541b23e..b0129c05890088104d6aa25c0fe69bb906c378df 100644
--- a/cmake/modules/RootBuildOptions.cmake
+++ b/cmake/modules/RootBuildOptions.cmake
@@ -118,7 +118,6 @@ ROOT_BUILD_OPTION(gfal ON "Enable support for GFAL (Grid File Access Library)")
 ROOT_BUILD_OPTION(globus OFF "Enable support for Globus Auth")
 ROOT_BUILD_OPTION(gnuinstall OFF "Perform installation following the GNU guidelines")
 ROOT_BUILD_OPTION(gsl_shared OFF "Enable linking against shared libraries for GSL (default no)")
-ROOT_BUILD_OPTION(hdfs OFF "Enable support for Hadoop Distributed File System (HDFS)")
 ROOT_BUILD_OPTION(http ON "Enable suppport for HTTP server")
 ROOT_BUILD_OPTION(imt ON "Enable support for implicit multi-threading via IntelĀ® Thread Bulding Blocks (TBB)")
 ROOT_BUILD_OPTION(jemalloc OFF "Use jemalloc memory allocator")
@@ -235,7 +234,6 @@ if(all)
  set(fitsio_defvalue ON)
  set(fortran_defvalue ON)
  set(gdml_defvalue ON)
- set(hdfs_defvalue ON)
  set(http_defvalue ON)
  set(krb5_defvalue ON)
  set(ldap_defvalue ON)
@@ -317,14 +315,14 @@ endif()
 ROOT_APPLY_OPTIONS()
 
 #---Removed options------------------------------------------------------------
-foreach(opt afdsmgrd afs bonjour castor chirp geocad glite gviz ios qt qtgsi rfio ruby sapdb srp table)
+foreach(opt afdsmgrd afs bonjour castor chirp geocad glite gviz hdfs ios qt qtgsi rfio ruby sapdb srp table)
   if(${opt})
     message(FATAL_ERROR ">>> Option '${opt}' has been removed in ROOT v6.16.")
   endif()
 endforeach()
 
 #---Deprecated options---------------------------------------------------------
-foreach(opt globus hdfs krb5 ldap memstat odbc)
+foreach(opt globus krb5 ldap memstat odbc)
   if(${opt})
     message(DEPRECATION ">>> Option '${opt}' is deprecated and will be removed in ROOT v6.18. Please inform rootdev@cern.ch should you still need it.")
   endif()
diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
index e60c4065ca331bd201dc5f56bea187b27fe25abc..fe26acd823b1c70492a7d8729d9287606cf2dd33 100644
--- a/cmake/modules/RootConfiguration.cmake
+++ b/cmake/modules/RootConfiguration.cmake
@@ -227,11 +227,6 @@ set(gfalincdir ${GFAL_INCLUDE_DIR})
 
 set(buildmemstat ${value${memstat}})
 
-set(buildhdfs ${value${hdfs}})
-set(hdfslibdir ${HDFS_LIBRARY_DIR})
-set(hdfslib ${HDFS_LIBRARY})
-set(hdfsincdir ${HDFS_INCLUDE_DIR})
-
 set(buildalien ${value${alien}})
 set(alienlibdir ${ALIEN_LIBRARY_DIR})
 set(alienlib ${ALIEN_LIBRARY})
diff --git a/cmake/modules/SearchInstalledSoftware.cmake b/cmake/modules/SearchInstalledSoftware.cmake
index 0f8b80fc9881b2f566a4740614ef3c9263ccab3d..3049ac71c08ed74aa667bdff69aa1744f181c381 100644
--- a/cmake/modules/SearchInstalledSoftware.cmake
+++ b/cmake/modules/SearchInstalledSoftware.cmake
@@ -1011,21 +1011,6 @@ if(r)
   endif()
 endif()
 
-
-#---Check for hdfs--------------------------------------------------------------------
-if(hdfs)
-  find_package(hdfs)
-  if(NOT HDFS_FOUND)
-    if(fail-on-missing)
-      message(FATAL_ERROR "hdfs library not found and is required (hdfs option enabled)")
-    else()
-      message(STATUS "hdfs library not found. Set variable HDFS_DIR to point to your hdfs installation")
-      message(STATUS "For the time being switching OFF 'hdfs' option")
-      set(hdfs OFF CACHE BOOL "Disabled because hdfs not found (${hdfs_description})" FORCE)
-    endif()
-  endif()
-endif()
-
 #---Check for Davix library-----------------------------------------------------------
 
 foreach(suffix FOUND INCLUDE_DIR INCLUDE_DIRS LIBRARY LIBRARIES)
diff --git a/config/Makefile.in b/config/Makefile.in
index e59ccb1978ebfb323015ddd71cf8fbed80861921..8c34f7a8a8a0e882c6bf191007065ee5badafaa0 100644
--- a/config/Makefile.in
+++ b/config/Makefile.in
@@ -219,11 +219,6 @@ GLIB2INCDIR    := $(filter-out /usr/include, @glib2incdir@)
 
 BUILDMEMSTAT   := @buildmemstat@
 
-BUILDHDFS      := @buildhdfs@
-HDFSLIBDIR     := @hdfslibdir@
-HDFSCLILIB     := @hdfslib@
-HDFSINCDIR     := $(filter-out /usr/include, @hdfsincdir@)
-
 BUILDALIEN     := @buildalien@
 ALIENLIBDIR    := @alienlibdir@
 ALIENCLILIB    := @alienlib@
diff --git a/documentation/doxygen/Doxyfile b/documentation/doxygen/Doxyfile
index ffd8cb135356063bd61b21f3b44e38d76baab6a7..ae5fc78a6b9921d836bd32c95119f7dd7c1df7c2 100644
--- a/documentation/doxygen/Doxyfile
+++ b/documentation/doxygen/Doxyfile
@@ -833,7 +833,6 @@ INPUT                  = . \
                          ../../html/ \
                          ../../io/dcache/ \
                          ../../io/gfal/ \
-                         ../../io/hdfs/ \
                          ../../io/io/ \
                          ../../io/sql/ \
                          ../../io/xml/ \
diff --git a/etc/plugins/TFile/P110_THDFSFile.C b/etc/plugins/TFile/P110_THDFSFile.C
deleted file mode 100644
index dc657e7f2d01e4f9ca37da49910009278c5d95c7..0000000000000000000000000000000000000000
--- a/etc/plugins/TFile/P110_THDFSFile.C
+++ /dev/null
@@ -1,5 +0,0 @@
-void P110_THDFSFile()
-{
-   gPluginMgr->AddHandler("TFile", "^hdfs:", "THDFSFile",
-      "HDFS", "THDFSFile(const char*,Option_t*,const char*,Int_t)");
-}
diff --git a/etc/plugins/TSystem/P060_THDFSSystem.C b/etc/plugins/TSystem/P060_THDFSSystem.C
deleted file mode 100644
index 88f80606813b8671590ea0732575607a91200bef..0000000000000000000000000000000000000000
--- a/etc/plugins/TSystem/P060_THDFSSystem.C
+++ /dev/null
@@ -1,5 +0,0 @@
-void P060_THDFSSystem()
-{
-   gPluginMgr->AddHandler("TSystem", "^hdfs:", "THDFSSystem",
-      "HDFS", "THDFSSystem()");
-}
diff --git a/io/CMakeLists.txt b/io/CMakeLists.txt
index 94bee5da3142d59bdc9bd2c2a50b9efe3a15075f..bb37dba4311c64cff5ff4e40ae2c05ef5f651c8d 100644
--- a/io/CMakeLists.txt
+++ b/io/CMakeLists.txt
@@ -11,6 +11,3 @@ endif()
 if(dcache)
   add_subdirectory(dcache)
 endif()
-if(hdfs)
-  add_subdirectory(hdfs)
-endif()
diff --git a/io/hdfs/CMakeLists.txt b/io/hdfs/CMakeLists.txt
deleted file mode 100644
index 0d24005da00fa37f18d0fbce31adc2bc0d6aa92d..0000000000000000000000000000000000000000
--- a/io/hdfs/CMakeLists.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-############################################################################
-# CMakeLists.txt file for building ROOT io/hdfs package
-############################################################################
-
-include_directories(${HDFS_INCLUDE_DIRS})
-
-add_definitions(-D_FILE_OFFSET_BITS=64)
-
-ROOT_STANDARD_LIBRARY_PACKAGE(HDFS
-                              HEADERS THDFSFile.h
-                              SOURCES src/THDFSFile.cxx
-                              LIBRARIES ${HDFS_LIBRARIES}
-                              DEPENDENCIES Net RIO Core)
diff --git a/io/hdfs/doc/index.html b/io/hdfs/doc/index.html
deleted file mode 100644
index 00a94f5887cc27b5620266a50f3c60ba4455ce7c..0000000000000000000000000000000000000000
--- a/io/hdfs/doc/index.html
+++ /dev/null
@@ -1,9 +0,0 @@
-BEGIN_HTML
-THis directory contains the I/O interface classes for the <b>HDFS</b>
-distributed file system.
-For more information about <b>HDFS</b>, see:
-<ul>
-<li><a href="http://hadoop.apache.org/hdfs/">The HDFS Website</a></li>
-<li><a href="http://wiki.apache.org/hadoop/LibHDFS">The libhdfs Wiki page</a></li>
-</ul>
-END_HTML
diff --git a/io/hdfs/inc/LinkDef.h b/io/hdfs/inc/LinkDef.h
deleted file mode 100644
index c3e14da6f3671ac87714874034a8c27d4a53360e..0000000000000000000000000000000000000000
--- a/io/hdfs/inc/LinkDef.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* @(#)root/hdfs:$Id$ */
-
-/*************************************************************************
- * Copyright (C) 1995-2009, Rene Brun and Fons Rademakers.               *
- * All rights reserved.                                                  *
- *                                                                       *
- * For the licensing terms see $ROOTSYS/LICENSE.                         *
- * For the list of contributors see $ROOTSYS/README/CREDITS.             *
- *************************************************************************/
-
-#ifdef __CINT__
-
-#pragma link off all globals;
-#pragma link off all classes;
-#pragma link off all functions;
-
-#pragma link C++ class THDFSFile;
-#pragma link C++ class THDFSSystem;
-
-#endif
diff --git a/io/hdfs/inc/THDFSFile.h b/io/hdfs/inc/THDFSFile.h
deleted file mode 100644
index c09f4ed3ffd2a9bf5877ca49b466f6a2122e80ee..0000000000000000000000000000000000000000
--- a/io/hdfs/inc/THDFSFile.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// @(#)root/hdfs:$Id$
-// Author: Brian Bockelman 29/09/2009
-
-/*************************************************************************
- * Copyright (C) 1995-2009, Rene Brun and Fons Rademakers.               *
- * All rights reserved.                                                  *
- *                                                                       *
- * For the licensing terms see $ROOTSYS/LICENSE.                         *
- * For the list of contributors see $ROOTSYS/README/CREDITS.             *
- *************************************************************************/
-
-#ifndef ROOT_THDFSFile
-#define ROOT_THDFSFile
-
-#include "TFile.h"
-#include "TSystem.h"
-
-class THDFSFile : public TFile {
-
-private:
-   void     *fHdfsFH;    ///< HDFS file handle
-   void     *fFS;        ///< HDFS user handle
-   Long64_t  fSize;      ///< File size
-   Long64_t  fSysOffset; ///< Seek offset in file
-   TUrl      fUrl;       ///< HDFS url
-   TString   fPath;      ///< HDFS path
-
-   Int_t    SysOpen(const char *pathname, Int_t flags, UInt_t mode);
-   Int_t    SysClose(Int_t fd);
-   Int_t    SysRead(Int_t fd, void *buf, Int_t len);
-   Int_t    SysWrite(Int_t fd, const void *buf, Int_t len);
-   Long64_t SysSeek(Int_t fd, Long64_t offset, Int_t whence);
-   Int_t    SysStat(Int_t fd, Long_t *id, Long64_t *size, Long_t *flags, Long_t *modtime);
-   Int_t    SysSync(Int_t fd);
-
-public:
-   THDFSFile(const char *path, Option_t *option="",
-             const char *ftitle="", Int_t compress = ROOT::RCompressionSetting::EDefaults::kUseGeneralPurpose);
-   virtual ~THDFSFile();
-
-   void ResetErrno() const;
-
-   ClassDef(THDFSFile, 0) //A ROOT file that reads/writes via HDFS
-};
-
-
-
-class THDFSSystem : public TSystem {
-
-private:
-   void  *fFH;           ///< HDFS filesystem handle.
-   void  *fDirp;         ///< Pointer to the array of file information.
-   TUrl  *fUrlp;         ///< Pointer to the array of directory content URLs.
-   Int_t  fDirEntries;   ///< The number of entries in the fDirp array.
-   Int_t  fDirCtr;       ///< The current position in the fDirp array.
-
-public:
-    THDFSSystem();
-    virtual ~THDFSSystem() { }
-
-    Int_t       MakeDirectory(const char *name);
-    void       *OpenDirectory(const char *name);
-    void        FreeDirectory(void *dirp);
-    const char *GetDirEntry(void *dirp);
-    Int_t       GetPathInfo(const char *path, FileStat_t &buf);
-    Bool_t      AccessPathName(const char *path, EAccessMode mode);
-    Int_t       Unlink(const char *path);
-
-    ClassDef(THDFSSystem,0)   // Directory handler for HDFS (THDFSFile)
-};
-
-#endif
diff --git a/io/hdfs/src/THDFSFile.cxx b/io/hdfs/src/THDFSFile.cxx
deleted file mode 100644
index 9707c9f3669e42712864934fb70678cff94f62ca..0000000000000000000000000000000000000000
--- a/io/hdfs/src/THDFSFile.cxx
+++ /dev/null
@@ -1,552 +0,0 @@
-// @(#)root/hdfs:$Id$
-// Author: Brian Bockelman 29/09/2009
-
-/*************************************************************************
- * Copyright (C) 1995-2002, Rene Brun and Fons Rademakers.               *
- * All rights reserved.                                                  *
- *                                                                       *
- * For the licensing terms see $ROOTSYS/LICENSE.                         *
- * For the list of contributors see $ROOTSYS/README/CREDITS.             *
- *************************************************************************/
-
-/**
-\class THDFSFile
-\ingroup IO
-
-Reads and writes its data via the HDFS protocols
-
-A THDFSFile is like a normal TFile except that it reads and writes
-its data via the HDFS protocols. For more information on HDFS, see
-http://hadoop.apache.org/hdfs/.
-This implementation interfaces with libhdfs, which is a JNI-based
-library (i.e., it will start a Java JVM internally the first time
-it is called). At a minimum, you will need your environment's
-$CLASSPATH variable set up properly to use. Here's an example of
-one way to properly set your classpath, assuming you use the OSG
-distribution of Hadoop:
-    $ source $HADOOP_CONF_DIR/hadoop-env.sh
-    $ export CLASSPATH=$HADOOP_CLASSPATH
-Additionally, you will need a valid libjvm in your $LD_LIBRARY_PATH
-This is usually found in either:
-    $JAVA_HOME/jre/lib/i386/server
-or
-    $JAVA_HOME/jre/lib/amd64/server
-This file can only be used if hdfs support is compiled into ROOT.
-The HDFS URLs follow the Hadoop notation and should be of the form:
-    hdfs://[host:port]/absolute/path/to/file/in/HDFS.root
-Any host or port information will be ignored; this is taken from the
-node's HDFS configuration files.
-
-Example HDFS URLs:
-
-    hdfs:///user/username/dir1/file2.root
-    hdfs://localhost/user/username/dir1/file2.root
-*/
-
-#include "syslog.h"
-#include "assert.h"
-#include "stdlib.h"
-
-#include "THDFSFile.h"
-#include "TError.h"
-#include "TSystem.h"
-#include "TROOT.h"
-
-#include "hdfs.h"
-//#include "hdfsJniHelper.h"
-
-// For now, we don't allow any write/fs modification operations.
-static const Bool_t R__HDFS_ALLOW_CHANGES = kFALSE;
-
-static const char hdfs_default_host[] = "default";
-static const int hdfs_default_port = 0;
-
-// The following snippet is used for developer-level debugging
-// Contributed by Pete Wyckoff of the HDFS project
-#define THDFSFile_TRACE
-#ifndef THDFSFile_TRACE
-#define TRACE(x) \
-  Debug("THDFSFile", "%s", x);
-#else
-#define TRACE(x);
-#endif
-
-ClassImp(THDFSFile);
-
-////////////////////////////////////////////////////////////////////////////////
-/// Usual Constructor.  See the TFile constructor for details.
-
-THDFSFile::THDFSFile(const char *path, Option_t *option,
-                     const char *ftitle, Int_t compress):
-   TFile(path, "WEB", ftitle, compress)
-{
-   fHdfsFH    = 0;
-   fFS        = 0;
-   fSize      = -1;
-   fSysOffset = 0;
-
-   fOption = option;
-   fOption.ToUpper();
-   Bool_t create   = (fOption == "CREATE") ? kTRUE : kFALSE;
-   Bool_t recreate = (fOption == "RECREATE") ? kTRUE : kFALSE;
-   Bool_t update   = (fOption == "UPDATE") ? kTRUE : kFALSE;
-   Bool_t read     = (fOption == "READ") ? kTRUE : kFALSE;
-   if (!create && !recreate && !update && !read) {
-      read    = kTRUE;
-      fOption = "READ";
-   }
-
-   Bool_t has_authn = kTRUE;
-
-   struct hdfsBuilder *bld = hdfsNewBuilder();
-   if (!bld) {
-      SysError("THDFSFile", "Error creating hdfs builder");
-      goto zombie;
-   }
-
-   hdfsBuilderSetNameNode(bld, hdfs_default_host);
-   hdfsBuilderSetNameNodePort(bld, hdfs_default_port);
-   if (has_authn) {
-      UserGroup_t *ugi = gSystem->GetUserInfo((char *)0);
-      const char *user = (ugi->fUser).Data();
-      hdfsBuilderSetUserName(bld, user);
-      delete ugi;
-   }
-
-   fFS = hdfsBuilderConnect(bld);
-
-   if (fFS == 0) {
-      SysError("THDFSFile", "HDFS client for %s cannot open the filesystem",
-               path);
-      goto zombie;
-   }
-
-   if (create || update || recreate) {
-      Int_t mode = O_RDWR | O_CREAT;
-      if (recreate) mode |= O_TRUNC;
-
-#ifndef WIN32
-      fD = SysOpen(path, mode, 0644);
-#else
-      fD = SysOpen(path, mode | O_BINARY, S_IREAD | S_IWRITE);
-#endif
-      if (fD == -1) {
-         SysError("THDFSFile", "file %s can not be opened", path);
-         goto zombie;
-      }
-      fWritable = kTRUE;
-   } else {
-#ifndef WIN32
-      fD = SysOpen(path, O_RDONLY, 0644);
-#else
-      fD = SysOpen(path, O_RDONLY | O_BINARY, S_IREAD | S_IWRITE);
-#endif
-      if (fD == -1) {
-         SysError("THDFSFile", "file %s can not be opened for reading", path);
-         goto zombie;
-      }
-      fWritable = kFALSE;
-   }
-
-   Init(create || recreate);
-
-   return;
-
-zombie:
-   // Error in opening file; make this a zombie
-   MakeZombie();
-   gDirectory = gROOT;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Close and clean-up HDFS file.
-
-THDFSFile::~THDFSFile()
-{
-   TRACE("destroy")
-
-   // We assume that the file is closed in SysClose
-   // Explicitly release reference to HDFS filesystem object.
-   // Turned off now due to compilation issues.
-   // The very awkward way of releasing HDFS FS objects (by accessing JNI
-   // internals) is going away in the next libhdfs version.
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Read specified number of bytes from current offset into the buffer.
-/// See documentation for TFile::SysRead().
-
-Int_t THDFSFile::SysRead(Int_t, void *buf, Int_t len)
-{
-   TRACE("READ")
-   tSize num_read_total = 0;
-
-   do {
-      tSize num_read = hdfsRead((hdfsFS)fFS, (hdfsFile)fHdfsFH, (char *)buf + num_read_total, len - num_read_total);
-      num_read_total += num_read;
-      if (num_read < 0) {
-         gSystem->SetErrorStr(strerror(errno));
-         break;
-      } else if (num_read == 0) {
-         break;
-      }
-   } while (num_read_total < len);
-
-   fSysOffset += num_read_total;
-   return num_read_total;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Seek to a specified position in the file.  See TFile::SysSeek().
-/// Note that THDFSFile does not support seeks when the file is open for write.
-
-Long64_t THDFSFile::SysSeek(Int_t, Long64_t offset, Int_t whence)
-{
-   TRACE("SEEK")
-   if (whence == SEEK_SET)
-      fSysOffset = offset;
-   else if (whence == SEEK_CUR)
-      fSysOffset += offset;
-   else if (whence == SEEK_END) {
-      if (offset > 0) {
-         SysError("THDFSFile", "Unable to seek past end of file");
-         return -1;
-      }
-      if (fSize == -1) {
-         hdfsFileInfo *info = hdfsGetPathInfo((hdfsFS)fFS, fPath);
-         if (info != 0) {
-            fSize = info->mSize;
-            free(info);
-         } else {
-            SysError("THDFSFile", "Unable to seek to end of file");
-            return -1;
-         }
-      }
-      fSysOffset = fSize;
-   } else {
-      SysError("THDFSFile", "Unknown whence!");
-      return -1;
-   }
-
-   if (hdfsSeek((hdfsFS)fFS, (hdfsFile)fHdfsFH, fSysOffset) != 0) {
-      SysError("THDFSFile", "Unable to seek to the given position");
-      return -1;
-   }
-
-   return fSysOffset;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Open a file in HDFS.
-
-Int_t THDFSFile::SysOpen(const char * pathname, Int_t flags, UInt_t)
-{
-   // This is given to us as a URL in Hadoop notation (hdfs://hadoop-name:9000/user/foo/bar or
-   // hdfs:///user/foo/bar); convert this to a file name.
-   fUrl = TUrl(pathname);
-
-   fPath = fUrl.GetFileAndOptions();
-   if (!fPath.BeginsWith("/")) {
-      fPath.Insert(0, '/');
-   }
-
-   if ((fHdfsFH = hdfsOpenFile((hdfsFS)fFS, fPath, flags, 0, 0, 0)) == 0) {
-      SysError("THDFSFile", "Unable to open file %s in HDFS", pathname);
-      return -1;
-   }
-   return 1;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Close the file in HDFS.
-
-Int_t THDFSFile::SysClose(Int_t)
-{
-   int result = hdfsCloseFile((hdfsFS)fFS, (hdfsFile)fHdfsFH);
-   fFS = 0;
-   fHdfsFH = 0;
-   return result;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Write a buffer into the file; this is not supported currently.
-
-Int_t THDFSFile::SysWrite(Int_t, const void *, Int_t)
-{
-   errno = ENOSYS;
-   return -1;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Perform a stat on the HDFS file; see TFile::SysStat().
-
-Int_t THDFSFile::SysStat(Int_t, Long_t* id, Long64_t* size, Long_t* flags, Long_t* modtime)
-{
-   *id = ::Hash(fPath);
-
-   hdfsFileInfo *info = hdfsGetPathInfo((hdfsFS)fFS, fPath);
-   if (info != 0) {
-      fSize = info->mSize;
-      *size = fSize;
-      if (info->mKind == kObjectKindFile)
-         *flags = 0;
-      else if (info->mKind == kObjectKindDirectory)
-         *flags = 1;
-      *modtime = info->mLastMod;
-      free(info);
-   } else {
-      return 1;
-   }
-
-   return 0;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Sync remaining data to disk; Not supported by HDFS.
-
-Int_t THDFSFile::SysSync(Int_t)
-{
-   errno = ENOSYS;
-   return -1;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// ResetErrno; simply calls TSystem::ResetErrno().
-
-void THDFSFile::ResetErrno() const
-{
-   TSystem::ResetErrno();
-}
-
-
-/**
-\class THDFSSystem
-\ingroup IO
-
-Directory handler for HDFS (THDFSFile).
-*/
-
-
-ClassImp(THDFSSystem);
-
-////////////////////////////////////////////////////////////////////////////////
-
-THDFSSystem::THDFSSystem() : TSystem("-hdfs", "HDFS Helper System")
-{
-   SetName("hdfs");
-
-   Bool_t has_authn = kTRUE;
-
-   struct hdfsBuilder *bld = hdfsNewBuilder();
-   if (!bld) {
-      SysError("THDFSSystem", "Error creating hdfs builder");
-      goto zombie;
-   }
-
-   hdfsBuilderSetNameNode(bld, hdfs_default_host);
-   hdfsBuilderSetNameNodePort(bld, hdfs_default_port);
-   if (has_authn) {
-      UserGroup_t *ugi = gSystem->GetUserInfo((char *)0);
-      const char *user = (ugi->fUser).Data();
-      hdfsBuilderSetUserName(bld, user);
-      delete ugi;
-   }
-
-   fFH = hdfsBuilderConnect(bld);
-
-   if (fFH == 0) {
-      SysError("THDFSSystem", "HDFS client cannot open the filesystem");
-      goto zombie;
-   }
-
-   fDirp = 0;
-
-   return;
-
-zombie:
-   // Error in opening file; make this a zombie
-   MakeZombie();
-   gDirectory = gROOT;
-
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Make a directory.
-
-Int_t THDFSSystem::MakeDirectory(const char * path)
-{
-   if (fFH != 0) {
-      Error("MakeDirectory", "No filesystem handle (should never happen)");
-      return -1;
-   }
-   TUrl url(path);
-
-   if (R__HDFS_ALLOW_CHANGES == kTRUE) {
-      return hdfsCreateDirectory((hdfsFS)fFH, url.GetFileAndOptions());
-   } else {
-      return -1;
-   }
-
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Open a directory via hdfs. Returns an opaque pointer to a dir
-/// structure. Returns 0 in case of error.
-
-void *THDFSSystem::OpenDirectory(const char * path)
-{
-   if (fFH == 0) {
-       Error("OpenDirectory", "No filesystem handle (should never happen)");
-       return 0;
-   }
-   TUrl url(path);
-   fDirp = 0;
-/*
-   if (fDirp) {
-      Error("OpenDirectory", "invalid directory pointer (should never happen)");
-      fDirp = 0;
-   }
-*/
-
-   hdfsFileInfo * dir = 0;
-   if ((dir = hdfsGetPathInfo((hdfsFS)fFH, url.GetFileAndOptions())) == 0) {
-      return 0;
-   }
-   if (dir->mKind != kObjectKindDirectory) {
-      return 0;
-   }
-
-   fDirp = (void *)hdfsListDirectory((hdfsFS)fFH, url.GetFileAndOptions(), &fDirEntries);
-   fDirCtr = 0;
-
-   fUrlp = new TUrl[fDirEntries];
-
-   return fDirp;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-void THDFSSystem::FreeDirectory(void *dirp)
-{
-   if (fFH == 0) {
-      Error("FreeDirectory", "No filesystem handle (should never happen)");
-      return;
-   }
-   if (dirp != fDirp) {
-      Error("FreeDirectory", "invalid directory pointer (should never happen)");
-      return;
-   }
-   if (fUrlp != 0) {
-      delete[] fUrlp;
-   }
-
-   hdfsFreeFileInfo((hdfsFileInfo *)fDirp, fDirEntries);
-   fDirp=0;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-const char *THDFSSystem::GetDirEntry(void *dirp)
-{
-   if (fFH == 0) {
-      Error("GetDirEntry", "No filesystem handle (should never happen)");
-      return 0;
-   }
-   if (dirp != fDirp) {
-      Error("GetDirEntry", "invalid directory pointer (should never happen)");
-      return 0;
-   }
-   if (dirp == 0) {
-      Error("GetDirEntry", "Passed an invalid directory pointer.");
-      return 0;
-   }
-
-   if (fDirCtr == fDirEntries-1) {
-      return 0;
-   }
-
-   hdfsFileInfo *fileInfo = ((hdfsFileInfo *)dirp) + fDirCtr;
-   fUrlp[fDirCtr].SetUrl(fileInfo->mName);
-   const char * result = fUrlp[fDirCtr].GetFile();
-   TUrl tempUrl;
-   tempUrl.SetUrl("hdfs:///");
-   tempUrl.SetFile(result);
-   fUrlp[fDirCtr].SetUrl(tempUrl.GetUrl());
-   result = fUrlp[fDirCtr].GetUrl();
-   fDirCtr++;
-
-   return result;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Get info about a file. Info is returned in the form of a FileStat_t
-/// structure (see TSystem.h).
-/// The function returns 0 in case of success and 1 if the file could
-/// not be stat'ed.
-
-Int_t THDFSSystem::GetPathInfo(const char *path, FileStat_t &buf)
-{
-   if (fFH == 0) {
-      Error("GetPathInfo", "No filesystem handle (should never happen)");
-      return 1;
-   }
-
-   TUrl url(path);
-
-   hdfsFileInfo *fileInfo = hdfsGetPathInfo((hdfsFS)fFH, url.GetFileAndOptions());
-
-   if (fileInfo == 0)
-      return 1;
-
-   buf.fDev    = 0;
-   buf.fIno    = 0;
-   buf.fMode   = fileInfo->mPermissions;
-   buf.fUid    = gSystem->GetUid(fileInfo->mOwner);
-   buf.fGid    = gSystem->GetGid(fileInfo->mGroup);
-   buf.fSize   = fileInfo->mSize;
-   buf.fMtime  = fileInfo->mLastAccess;
-   buf.fIsLink = kFALSE;
-
-   return 0;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Returns FALSE if one can access a file using the specified access mode.
-/// Mode is the same as for the Unix access(2) function.
-/// Attention, bizarre convention of return value!!
-
-Bool_t THDFSSystem::AccessPathName(const char *path, EAccessMode mode)
-{
-   if (mode & kExecutePermission || mode & kWritePermission)
-       return kTRUE;
-
-   if (fFH == 0) {
-      Error("AccessPathName", "No filesystem handle (should never happen)");
-      return kTRUE;
-   }
-
-   TUrl url(path);
-
-   if (hdfsExists((hdfsFS)fFH, url.GetFileAndOptions()) == 0)
-      return kFALSE;
-   else
-      return kTRUE;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-/// Unlink, i.e. remove, a file or directory. Returns 0 when successful,
-/// -1 in case of failure.
-
-Int_t THDFSSystem::Unlink(const char * path)
-{
-   if (fFH == 0) {
-      Error("Unlink", "No filesystem handle (should never happen)");
-      return kTRUE;
-   }
-
-   if (R__HDFS_ALLOW_CHANGES == kTRUE) {
-      return hdfsDelete((hdfsFS)fFH, path, 1);
-   } else {
-      return -1;
-   }
-}
diff --git a/io/io/src/TMemFile.cxx b/io/io/src/TMemFile.cxx
index 0ccf0ce60a1d5f238f7afbaef5d30d259731bb25..ac1ce8420605f419aa1561708d22c664e7b37032 100644
--- a/io/io/src/TMemFile.cxx
+++ b/io/io/src/TMemFile.cxx
@@ -643,7 +643,7 @@ Int_t TMemFile::SysWrite(Int_t fd, const void *buf, Int_t len)
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-/// Perform a stat on the HDFS file; see TFile::SysStat().
+/// Perform a stat on the file; see TFile::SysStat().
 
 Int_t TMemFile::SysStat(Int_t, Long_t* /* id */, Long64_t* /* size */, Long_t* /* flags */, Long_t* /* modtime */)
 {