001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.fs;
019    
020    
021    import java.io.FileNotFoundException;
022    import java.io.IOException;
023    import java.lang.reflect.Constructor;
024    import java.net.URI;
025    import java.net.URISyntaxException;
026    import java.util.ArrayList;
027    import java.util.EnumSet;
028    import java.util.HashMap;
029    import java.util.List;
030    import java.util.Map;
031    import java.util.NoSuchElementException;
032    import java.util.StringTokenizer;
033    import java.util.concurrent.ConcurrentHashMap;
034    
035    import org.apache.commons.logging.Log;
036    import org.apache.commons.logging.LogFactory;
037    import org.apache.hadoop.HadoopIllegalArgumentException;
038    import org.apache.hadoop.classification.InterfaceAudience;
039    import org.apache.hadoop.classification.InterfaceStability;
040    import org.apache.hadoop.conf.Configuration;
041    import org.apache.hadoop.fs.FileSystem.Statistics;
042    import org.apache.hadoop.fs.Options.ChecksumOpt;
043    import org.apache.hadoop.fs.Options.CreateOpts;
044    import org.apache.hadoop.fs.Options.Rename;
045    import org.apache.hadoop.fs.permission.FsPermission;
046    import org.apache.hadoop.fs.InvalidPathException;
047    import org.apache.hadoop.security.AccessControlException;
048    import org.apache.hadoop.security.SecurityUtil;
049    import org.apache.hadoop.security.token.Token;
050    import org.apache.hadoop.util.Progressable;
051    
052    /**
053     * This class provides an interface for implementors of a Hadoop file system
054     * (analogous to the VFS of Unix). Applications do not access this class;
055     * instead they access files across all file systems using {@link FileContext}.
056     * 
057     * Pathnames passed to AbstractFileSystem can be fully qualified URI that
058     * matches the "this" file system (ie same scheme and authority) 
059     * or a Slash-relative name that is assumed to be relative
060     * to the root of the "this" file system .
061     */
062    @InterfaceAudience.Public
063    @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
064    public abstract class AbstractFileSystem {
065      static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
066    
067      /** Recording statistics per a file system class. */
068      private static final Map<URI, Statistics> 
069          STATISTICS_TABLE = new HashMap<URI, Statistics>();
070      
071      /** Cache of constructors for each file system class. */
072      private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 
073        new ConcurrentHashMap<Class<?>, Constructor<?>>();
074      
075      private static final Class<?>[] URI_CONFIG_ARGS = 
076        new Class[]{URI.class, Configuration.class};
077      
078      /** The statistics for this file system. */
079      protected Statistics statistics;
080      
081      private final URI myUri;
082      
083      public Statistics getStatistics() {
084        return statistics;
085      }
086      
087      /**
088       * Returns true if the specified string is considered valid in the path part
089       * of a URI by this file system.  The default implementation enforces the rules
090       * of HDFS, but subclasses may override this method to implement specific
091       * validation rules for specific file systems.
092       * 
093       * @param src String source filename to check, path part of the URI
094       * @return boolean true if the specified string is considered valid
095       */
096      public boolean isValidName(String src) {
097        // Prohibit ".." "." and anything containing ":"
098        StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
099        while(tokens.hasMoreTokens()) {
100          String element = tokens.nextToken();
101          if (element.equals("..") ||
102              element.equals(".")  ||
103              (element.indexOf(":") >= 0)) {
104            return false;
105          }
106        }
107        return true;
108      }
109      
110      /** 
111       * Create an object for the given class and initialize it from conf.
112       * @param theClass class of which an object is created
113       * @param conf Configuration
114       * @return a new object
115       */
116      @SuppressWarnings("unchecked")
117      static <T> T newInstance(Class<T> theClass,
118        URI uri, Configuration conf) {
119        T result;
120        try {
121          Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
122          if (meth == null) {
123            meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS);
124            meth.setAccessible(true);
125            CONSTRUCTOR_CACHE.put(theClass, meth);
126          }
127          result = meth.newInstance(uri, conf);
128        } catch (Exception e) {
129          throw new RuntimeException(e);
130        }
131        return result;
132      }
133      
134      /**
135       * Create a file system instance for the specified uri using the conf. The
136       * conf is used to find the class name that implements the file system. The
137       * conf is also passed to the file system for its configuration.
138       *
139       * @param uri URI of the file system
140       * @param conf Configuration for the file system
141       * 
142       * @return Returns the file system for the given URI
143       *
144       * @throws UnsupportedFileSystemException file system for <code>uri</code> is
145       *           not found
146       */
147      public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
148          throws UnsupportedFileSystemException {
149        Class<?> clazz = conf.getClass("fs.AbstractFileSystem." + 
150                                    uri.getScheme() + ".impl", null);
151        if (clazz == null) {
152          throw new UnsupportedFileSystemException(
153              "No AbstractFileSystem for scheme: " + uri.getScheme());
154        }
155        return (AbstractFileSystem) newInstance(clazz, uri, conf);
156      }
157    
158      /**
159       * Get the statistics for a particular file system.
160       * 
161       * @param uri
162       *          used as key to lookup STATISTICS_TABLE. Only scheme and authority
163       *          part of the uri are used.
164       * @return a statistics object
165       */
166      protected static synchronized Statistics getStatistics(URI uri) {
167        String scheme = uri.getScheme();
168        if (scheme == null) {
169          throw new IllegalArgumentException("Scheme not defined in the uri: "
170              + uri);
171        }
172        URI baseUri = getBaseUri(uri);
173        Statistics result = STATISTICS_TABLE.get(baseUri);
174        if (result == null) {
175          result = new Statistics(scheme);
176          STATISTICS_TABLE.put(baseUri, result);
177        }
178        return result;
179      }
180      
181      private static URI getBaseUri(URI uri) {
182        String scheme = uri.getScheme();
183        String authority = uri.getAuthority();
184        String baseUriString = scheme + "://";
185        if (authority != null) {
186          baseUriString = baseUriString + authority;
187        } else {
188          baseUriString = baseUriString + "/";
189        }
190        return URI.create(baseUriString);
191      }
192      
193      public static synchronized void clearStatistics() {
194        for(Statistics stat: STATISTICS_TABLE.values()) {
195          stat.reset();
196        }
197      }
198    
199      /**
200       * Prints statistics for all file systems.
201       */
202      public static synchronized void printStatistics() {
203        for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
204          System.out.println("  FileSystem " + pair.getKey().getScheme() + "://"
205              + pair.getKey().getAuthority() + ": " + pair.getValue());
206        }
207      }
208      
209      protected static synchronized Map<URI, Statistics> getAllStatistics() {
210        Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
211            STATISTICS_TABLE.size());
212        for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
213          URI key = pair.getKey();
214          Statistics value = pair.getValue();
215          Statistics newStatsObj = new Statistics(value);
216          statsMap.put(URI.create(key.toString()), newStatsObj);
217        }
218        return statsMap;
219      }
220    
221      /**
222       * The main factory method for creating a file system. Get a file system for
223       * the URI's scheme and authority. The scheme of the <code>uri</code>
224       * determines a configuration property name,
225       * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
226       * AbstractFileSystem class.
227       * 
228       * The entire URI and conf is passed to the AbstractFileSystem factory method.
229       * 
230       * @param uri for the file system to be created.
231       * @param conf which is passed to the file system impl.
232       * 
233       * @return file system for the given URI.
234       * 
235       * @throws UnsupportedFileSystemException if the file system for
236       *           <code>uri</code> is not supported.
237       */
238      public static AbstractFileSystem get(final URI uri, final Configuration conf)
239          throws UnsupportedFileSystemException {
240        return createFileSystem(uri, conf);
241      }
242    
243      /**
244       * Constructor to be called by subclasses.
245       * 
246       * @param uri for this file system.
247       * @param supportedScheme the scheme supported by the implementor
248       * @param authorityNeeded if true then theURI must have authority, if false
249       *          then the URI must have null authority.
250       *
251       * @throws URISyntaxException <code>uri</code> has syntax error
252       */
253      public AbstractFileSystem(final URI uri, final String supportedScheme,
254          final boolean authorityNeeded, final int defaultPort)
255          throws URISyntaxException {
256        myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort);
257        statistics = getStatistics(uri); 
258      }
259      
260      /**
261       * Check that the Uri's scheme matches
262       * @param uri
263       * @param supportedScheme
264       */
265      public void checkScheme(URI uri, String supportedScheme) {
266        String scheme = uri.getScheme();
267        if (scheme == null) {
268          throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
269        }
270        if (!scheme.equals(supportedScheme)) {
271          throw new HadoopIllegalArgumentException("Uri scheme " + uri
272              + " does not match the scheme " + supportedScheme);
273        }
274      }
275    
276      /**
277       * Get the URI for the file system based on the given URI. The path, query
278       * part of the given URI is stripped out and default file system port is used
279       * to form the URI.
280       * 
281       * @param uri FileSystem URI.
282       * @param authorityNeeded if true authority cannot be null in the URI. If
283       *          false authority must be null.
284       * @param defaultPort default port to use if port is not specified in the URI.
285       * 
286       * @return URI of the file system
287       * 
288       * @throws URISyntaxException <code>uri</code> has syntax error
289       */
290      private URI getUri(URI uri, String supportedScheme,
291          boolean authorityNeeded, int defaultPort) throws URISyntaxException {
292        checkScheme(uri, supportedScheme);
293        // A file system implementation that requires authority must always
294        // specify default port
295        if (defaultPort < 0 && authorityNeeded) {
296          throw new HadoopIllegalArgumentException(
297              "FileSystem implementation error -  default port " + defaultPort
298                  + " is not valid");
299        }
300        String authority = uri.getAuthority();
301        if (authority == null) {
302           if (authorityNeeded) {
303             throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
304           } else {
305             return new URI(supportedScheme + ":///");
306           }   
307        }
308        // authority is non null  - AuthorityNeeded may be true or false.
309        int port = uri.getPort();
310        port = (port == -1 ? defaultPort : port);
311        if (port == -1) { // no port supplied and default port is not specified
312          return new URI(supportedScheme, authority, "/", null);
313        }
314        return new URI(supportedScheme + "://" + uri.getHost() + ":" + port);
315      }
316      
317      /**
318       * The default port of this file system.
319       * 
320       * @return default port of this file system's Uri scheme
321       *         A uri with a port of -1 => default port;
322       */
323      public abstract int getUriDefaultPort();
324    
325      /**
326       * Returns a URI whose scheme and authority identify this FileSystem.
327       * 
328       * @return the uri of this file system.
329       */
330      public URI getUri() {
331        return myUri;
332      }
333      
334      /**
335       * Check that a Path belongs to this FileSystem.
336       * 
337       * If the path is fully qualified URI, then its scheme and authority
338       * matches that of this file system. Otherwise the path must be 
339       * slash-relative name.
340       * 
341       * @throws InvalidPathException if the path is invalid
342       */
343      public void checkPath(Path path) {
344        URI uri = path.toUri();
345        String thatScheme = uri.getScheme();
346        String thatAuthority = uri.getAuthority();
347        if (thatScheme == null) {
348          if (thatAuthority == null) {
349            if (path.isUriPathAbsolute()) {
350              return;
351            }
352            throw new InvalidPathException("relative paths not allowed:" + 
353                path);
354          } else {
355            throw new InvalidPathException(
356                "Path without scheme with non-null authority:" + path);
357          }
358        }
359        String thisScheme = this.getUri().getScheme();
360        String thisHost = this.getUri().getHost();
361        String thatHost = uri.getHost();
362        
363        // Schemes and hosts must match.
364        // Allow for null Authority for file:///
365        if (!thisScheme.equalsIgnoreCase(thatScheme) ||
366           (thisHost != null && 
367                !thisHost.equalsIgnoreCase(thatHost)) ||
368           (thisHost == null && thatHost != null)) {
369          throw new InvalidPathException("Wrong FS: " + path + ", expected: "
370              + this.getUri());
371        }
372        
373        // Ports must match, unless this FS instance is using the default port, in
374        // which case the port may be omitted from the given URI
375        int thisPort = this.getUri().getPort();
376        int thatPort = uri.getPort();
377        if (thatPort == -1) { // -1 => defaultPort of Uri scheme
378          thatPort = this.getUriDefaultPort();
379        }
380        if (thisPort != thatPort) {
381          throw new InvalidPathException("Wrong FS: " + path + ", expected: "
382              + this.getUri());
383        }
384      }
385      
386      /**
387       * Get the path-part of a pathname. Checks that URI matches this file system
388       * and that the path-part is a valid name.
389       * 
390       * @param p path
391       * 
392       * @return path-part of the Path p
393       */
394      public String getUriPath(final Path p) {
395        checkPath(p);
396        String s = p.toUri().getPath();
397        if (!isValidName(s)) {
398          throw new InvalidPathException("Path part " + s + " from URI " + p
399              + " is not a valid filename.");
400        }
401        return s;
402      }
403      
404      /**
405       * Make the path fully qualified to this file system
406       * @param path
407       * @return the qualified path
408       */
409      public Path makeQualified(Path path) {
410        checkPath(path);
411        return path.makeQualified(this.getUri(), null);
412      }
413      
414      /**
415       * Some file systems like LocalFileSystem have an initial workingDir
416       * that is used as the starting workingDir. For other file systems
417       * like HDFS there is no built in notion of an initial workingDir.
418       * 
419       * @return the initial workingDir if the file system has such a notion
420       *         otherwise return a null.
421       */
422      public Path getInitialWorkingDirectory() {
423        return null;
424      }
425      
426      /** 
427       * Return the current user's home directory in this file system.
428       * The default implementation returns "/user/$USER/".
429       * 
430       * @return current user's home directory.
431       */
432      public Path getHomeDirectory() {
433        return new Path("/user/"+System.getProperty("user.name")).makeQualified(
434                                                                    getUri(), null);
435      }
436      
437      /**
438       * Return a set of server default configuration values.
439       * 
440       * @return server default configuration values
441       * 
442       * @throws IOException an I/O error occurred
443       */
444      public abstract FsServerDefaults getServerDefaults() throws IOException; 
445    
446      /**
447       * Return the fully-qualified path of path f resolving the path
448       * through any internal symlinks or mount point
449       * @param p path to be resolved
450       * @return fully qualified path 
451       * @throws FileNotFoundException, AccessControlException, IOException
452       *         UnresolvedLinkException if symbolic link on path cannot be resolved
453       *          internally
454       */
455       public Path resolvePath(final Path p) throws FileNotFoundException,
456               UnresolvedLinkException, AccessControlException, IOException {
457         checkPath(p);
458         return getFileStatus(p).getPath(); // default impl is to return the path
459       }
460      
461      /**
462       * The specification of this method matches that of
463       * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
464       * that the Path f must be fully qualified and the permission is absolute
465       * (i.e. umask has been applied).
466       */
467      public final FSDataOutputStream create(final Path f,
468          final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
469          throws AccessControlException, FileAlreadyExistsException,
470          FileNotFoundException, ParentNotDirectoryException,
471          UnsupportedFileSystemException, UnresolvedLinkException, IOException {
472        checkPath(f);
473        int bufferSize = -1;
474        short replication = -1;
475        long blockSize = -1;
476        int bytesPerChecksum = -1;
477        ChecksumOpt checksumOpt = null;
478        FsPermission permission = null;
479        Progressable progress = null;
480        Boolean createParent = null;
481     
482        for (CreateOpts iOpt : opts) {
483          if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
484            if (blockSize != -1) {
485              throw new HadoopIllegalArgumentException(
486                  "BlockSize option is set multiple times");
487            }
488            blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
489          } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
490            if (bufferSize != -1) {
491              throw new HadoopIllegalArgumentException(
492                  "BufferSize option is set multiple times");
493            }
494            bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
495          } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
496            if (replication != -1) {
497              throw new HadoopIllegalArgumentException(
498                  "ReplicationFactor option is set multiple times");
499            }
500            replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
501          } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
502            if (bytesPerChecksum != -1) {
503              throw new HadoopIllegalArgumentException(
504                  "BytesPerChecksum option is set multiple times");
505            }
506            bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
507          } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
508            if (checksumOpt != null) {
509              throw new  HadoopIllegalArgumentException(
510                  "CreateChecksumType option is set multiple times");
511            }
512            checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
513          } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
514            if (permission != null) {
515              throw new HadoopIllegalArgumentException(
516                  "Perms option is set multiple times");
517            }
518            permission = ((CreateOpts.Perms) iOpt).getValue();
519          } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
520            if (progress != null) {
521              throw new HadoopIllegalArgumentException(
522                  "Progress option is set multiple times");
523            }
524            progress = ((CreateOpts.Progress) iOpt).getValue();
525          } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
526            if (createParent != null) {
527              throw new HadoopIllegalArgumentException(
528                  "CreateParent option is set multiple times");
529            }
530            createParent = ((CreateOpts.CreateParent) iOpt).getValue();
531          } else {
532            throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
533                iOpt.getClass().getName());
534          }
535        }
536        if (permission == null) {
537          throw new HadoopIllegalArgumentException("no permission supplied");
538        }
539    
540    
541        FsServerDefaults ssDef = getServerDefaults();
542        if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
543          throw new IOException("Internal error: default blockSize is" + 
544              " not a multiple of default bytesPerChecksum ");
545        }
546        
547        if (blockSize == -1) {
548          blockSize = ssDef.getBlockSize();
549        }
550    
551        // Create a checksum option honoring user input as much as possible.
552        // If bytesPerChecksum is specified, it will override the one set in
553        // checksumOpt. Any missing value will be filled in using the default.
554        ChecksumOpt defaultOpt = new ChecksumOpt(
555            ssDef.getChecksumType(),
556            ssDef.getBytesPerChecksum());
557        checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
558            checksumOpt, bytesPerChecksum);
559    
560        if (bufferSize == -1) {
561          bufferSize = ssDef.getFileBufferSize();
562        }
563        if (replication == -1) {
564          replication = ssDef.getReplication();
565        }
566        if (createParent == null) {
567          createParent = false;
568        }
569    
570        if (blockSize % bytesPerChecksum != 0) {
571          throw new HadoopIllegalArgumentException(
572                 "blockSize should be a multiple of checksumsize");
573        }
574    
575        return this.createInternal(f, createFlag, permission, bufferSize,
576          replication, blockSize, progress, checksumOpt, createParent);
577      }
578    
579      /**
580       * The specification of this method matches that of
581       * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
582       * have been declared explicitly.
583       */
584      public abstract FSDataOutputStream createInternal(Path f,
585          EnumSet<CreateFlag> flag, FsPermission absolutePermission,
586          int bufferSize, short replication, long blockSize, Progressable progress,
587          ChecksumOpt checksumOpt, boolean createParent)
588          throws AccessControlException, FileAlreadyExistsException,
589          FileNotFoundException, ParentNotDirectoryException,
590          UnsupportedFileSystemException, UnresolvedLinkException, IOException;
591    
592      /**
593       * The specification of this method matches that of
594       * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
595       * f must be fully qualified and the permission is absolute (i.e. 
596       * umask has been applied).
597       */
598      public abstract void mkdir(final Path dir, final FsPermission permission,
599          final boolean createParent) throws AccessControlException,
600          FileAlreadyExistsException, FileNotFoundException,
601          UnresolvedLinkException, IOException;
602    
603      /**
604       * The specification of this method matches that of
605       * {@link FileContext#delete(Path, boolean)} except that Path f must be for
606       * this file system.
607       */
608      public abstract boolean delete(final Path f, final boolean recursive)
609          throws AccessControlException, FileNotFoundException,
610          UnresolvedLinkException, IOException;
611    
612      /**
613       * The specification of this method matches that of
614       * {@link FileContext#open(Path)} except that Path f must be for this
615       * file system.
616       */
617      public FSDataInputStream open(final Path f) throws AccessControlException,
618          FileNotFoundException, UnresolvedLinkException, IOException {
619        return open(f, getServerDefaults().getFileBufferSize());
620      }
621    
622      /**
623       * The specification of this method matches that of
624       * {@link FileContext#open(Path, int)} except that Path f must be for this
625       * file system.
626       */
627      public abstract FSDataInputStream open(final Path f, int bufferSize)
628          throws AccessControlException, FileNotFoundException,
629          UnresolvedLinkException, IOException;
630    
631      /**
632       * The specification of this method matches that of
633       * {@link FileContext#setReplication(Path, short)} except that Path f must be
634       * for this file system.
635       */
636      public abstract boolean setReplication(final Path f,
637          final short replication) throws AccessControlException,
638          FileNotFoundException, UnresolvedLinkException, IOException;
639    
640      /**
641       * The specification of this method matches that of
642       * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
643       * f must be for this file system.
644       */
645      public final void rename(final Path src, final Path dst,
646          final Options.Rename... options) throws AccessControlException,
647          FileAlreadyExistsException, FileNotFoundException,
648          ParentNotDirectoryException, UnresolvedLinkException, IOException {
649        boolean overwrite = false;
650        if (null != options) {
651          for (Rename option : options) {
652            if (option == Rename.OVERWRITE) {
653              overwrite = true;
654            }
655          }
656        }
657        renameInternal(src, dst, overwrite);
658      }
659      
660      /**
661       * The specification of this method matches that of
662       * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
663       * f must be for this file system and NO OVERWRITE is performed.
664       * 
665       * File systems that do not have a built in overwrite need implement only this
666       * method and can take advantage of the default impl of the other
667       * {@link #renameInternal(Path, Path, boolean)}
668       */
669      public abstract void renameInternal(final Path src, final Path dst)
670          throws AccessControlException, FileAlreadyExistsException,
671          FileNotFoundException, ParentNotDirectoryException,
672          UnresolvedLinkException, IOException;
673      
674      /**
675       * The specification of this method matches that of
676       * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
677       * f must be for this file system.
678       */
679      public void renameInternal(final Path src, final Path dst,
680          boolean overwrite) throws AccessControlException,
681          FileAlreadyExistsException, FileNotFoundException,
682          ParentNotDirectoryException, UnresolvedLinkException, IOException {
683        // Default implementation deals with overwrite in a non-atomic way
684        final FileStatus srcStatus = getFileLinkStatus(src);
685    
686        FileStatus dstStatus;
687        try {
688          dstStatus = getFileLinkStatus(dst);
689        } catch (IOException e) {
690          dstStatus = null;
691        }
692        if (dstStatus != null) {
693          if (dst.equals(src)) {
694            throw new FileAlreadyExistsException(
695                "The source "+src+" and destination "+dst+" are the same");
696          }
697          if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) {
698            throw new FileAlreadyExistsException(
699                "Cannot rename symlink "+src+" to its target "+dst);
700          }
701          // It's OK to rename a file to a symlink and vice versa
702          if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
703            throw new IOException("Source " + src + " and destination " + dst
704                + " must both be directories");
705          }
706          if (!overwrite) {
707            throw new FileAlreadyExistsException("Rename destination " + dst
708                + " already exists.");
709          }
710          // Delete the destination that is a file or an empty directory
711          if (dstStatus.isDirectory()) {
712            RemoteIterator<FileStatus> list = listStatusIterator(dst);
713            if (list != null && list.hasNext()) {
714              throw new IOException(
715                  "Rename cannot overwrite non empty destination directory " + dst);
716            }
717          }
718          delete(dst, false);
719        } else {
720          final Path parent = dst.getParent();
721          final FileStatus parentStatus = getFileStatus(parent);
722          if (parentStatus.isFile()) {
723            throw new ParentNotDirectoryException("Rename destination parent "
724                + parent + " is a file.");
725          }
726        }
727        renameInternal(src, dst);
728      }
729      
730      /**
731       * Returns true if the file system supports symlinks, false otherwise.
732       * @return true if filesystem supports symlinks
733       */
734      public boolean supportsSymlinks() {
735        return false;
736      }
737      
738      /**
739       * The specification of this method matches that of  
740       * {@link FileContext#createSymlink(Path, Path, boolean)};
741       */
742      public void createSymlink(final Path target, final Path link,
743          final boolean createParent) throws IOException, UnresolvedLinkException {
744        throw new IOException("File system does not support symlinks");    
745      }
746    
747      /**
748       * Partially resolves the path. This is used during symlink resolution in
749       * {@link FSLinkResolver}, and differs from the similarly named method
750       * {@link FileContext#getLinkTarget(Path)}.
751       */
752      public Path getLinkTarget(final Path f) throws IOException {
753        /* We should never get here. Any file system that threw an
754         * UnresolvedLinkException, causing this function to be called,
755         * needs to override this method.
756         */
757        throw new AssertionError();
758      }
759        
760      /**
761       * The specification of this method matches that of
762       * {@link FileContext#setPermission(Path, FsPermission)} except that Path f
763       * must be for this file system.
764       */
765      public abstract void setPermission(final Path f,
766          final FsPermission permission) throws AccessControlException,
767          FileNotFoundException, UnresolvedLinkException, IOException;
768    
769      /**
770       * The specification of this method matches that of
771       * {@link FileContext#setOwner(Path, String, String)} except that Path f must
772       * be for this file system.
773       */
774      public abstract void setOwner(final Path f, final String username,
775          final String groupname) throws AccessControlException,
776          FileNotFoundException, UnresolvedLinkException, IOException;
777    
778      /**
779       * The specification of this method matches that of
780       * {@link FileContext#setTimes(Path, long, long)} except that Path f must be
781       * for this file system.
782       */
783      public abstract void setTimes(final Path f, final long mtime,
784        final long atime) throws AccessControlException, FileNotFoundException,
785          UnresolvedLinkException, IOException;
786    
787      /**
788       * The specification of this method matches that of
789       * {@link FileContext#getFileChecksum(Path)} except that Path f must be for
790       * this file system.
791       */
792      public abstract FileChecksum getFileChecksum(final Path f)
793          throws AccessControlException, FileNotFoundException,
794          UnresolvedLinkException, IOException;
795      
796      /**
797       * The specification of this method matches that of
798       * {@link FileContext#getFileStatus(Path)} 
799       * except that an UnresolvedLinkException may be thrown if a symlink is 
800       * encountered in the path.
801       */
802      public abstract FileStatus getFileStatus(final Path f)
803          throws AccessControlException, FileNotFoundException,
804          UnresolvedLinkException, IOException;
805    
806      /**
807       * The specification of this method matches that of
808       * {@link FileContext#getFileLinkStatus(Path)}
809       * except that an UnresolvedLinkException may be thrown if a symlink is  
810       * encountered in the path leading up to the final path component.
811       * If the file system does not support symlinks then the behavior is
812       * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.
813       */
814      public FileStatus getFileLinkStatus(final Path f)
815          throws AccessControlException, FileNotFoundException,
816          UnsupportedFileSystemException, IOException {
817        return getFileStatus(f);
818      }
819    
820      /**
821       * The specification of this method matches that of
822       * {@link FileContext#getFileBlockLocations(Path, long, long)} except that
823       * Path f must be for this file system.
824       */
825      public abstract BlockLocation[] getFileBlockLocations(final Path f,
826          final long start, final long len) throws AccessControlException,
827          FileNotFoundException, UnresolvedLinkException, IOException;
828    
829      /**
830       * The specification of this method matches that of
831       * {@link FileContext#getFsStatus(Path)} except that Path f must be for this
832       * file system.
833       */
834      public FsStatus getFsStatus(final Path f) throws AccessControlException,
835          FileNotFoundException, UnresolvedLinkException, IOException {
836        // default impl gets FsStatus of root
837        return getFsStatus();
838      }
839      
840      /**
841       * The specification of this method matches that of
842       * {@link FileContext#getFsStatus(Path)}.
843       */
844      public abstract FsStatus getFsStatus() throws AccessControlException,
845          FileNotFoundException, IOException;
846    
847      /**
848       * The specification of this method matches that of
849       * {@link FileContext#listStatus(Path)} except that Path f must be for this
850       * file system.
851       */
852      public RemoteIterator<FileStatus> listStatusIterator(final Path f)
853          throws AccessControlException, FileNotFoundException,
854          UnresolvedLinkException, IOException {
855        return new RemoteIterator<FileStatus>() {
856          private int i = 0;
857          private FileStatus[] statusList = listStatus(f);
858          
859          @Override
860          public boolean hasNext() {
861            return i < statusList.length;
862          }
863          
864          @Override
865          public FileStatus next() {
866            if (!hasNext()) {
867              throw new NoSuchElementException();
868            }
869            return statusList[i++];
870          }
871        };
872      }
873    
874      /**
875       * The specification of this method matches that of
876       * {@link FileContext#listLocatedStatus(Path)} except that Path f 
877       * must be for this file system.
878       */
879      public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
880          throws AccessControlException, FileNotFoundException,
881          UnresolvedLinkException, IOException {
882        return new RemoteIterator<LocatedFileStatus>() {
883          private RemoteIterator<FileStatus> itor = listStatusIterator(f);
884          
885          @Override
886          public boolean hasNext() throws IOException {
887            return itor.hasNext();
888          }
889          
890          @Override
891          public LocatedFileStatus next() throws IOException {
892            if (!hasNext()) {
893              throw new NoSuchElementException("No more entry in " + f);
894            }
895            FileStatus result = itor.next();
896            BlockLocation[] locs = null;
897            if (result.isFile()) {
898              locs = getFileBlockLocations(
899                  result.getPath(), 0, result.getLen());
900            }
901            return new LocatedFileStatus(result, locs);
902          }
903        };
904      }
905    
906      /**
907       * The specification of this method matches that of
908       * {@link FileContext.Util#listStatus(Path)} except that Path f must be 
909       * for this file system.
910       */
911      public abstract FileStatus[] listStatus(final Path f)
912          throws AccessControlException, FileNotFoundException,
913          UnresolvedLinkException, IOException;
914    
915      /**
916       * @return an iterator over the corrupt files under the given path
917       * (may contain duplicates if a file has more than one corrupt block)
918       * @throws IOException
919       */
920      public RemoteIterator<Path> listCorruptFileBlocks(Path path)
921        throws IOException {
922        throw new UnsupportedOperationException(getClass().getCanonicalName() +
923                                                " does not support" +
924                                                " listCorruptFileBlocks");
925      }
926    
927      /**
928       * The specification of this method matches that of
929       * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
930       * must be for this file system.
931       */
932      public abstract void setVerifyChecksum(final boolean verifyChecksum)
933          throws AccessControlException, IOException;
934      
935      /**
936       * Get a canonical name for this file system.
937       * @return a URI string that uniquely identifies this file system
938       */
939      public String getCanonicalServiceName() {
940        return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort());
941      }
942      
943      /**
944       * Get one or more delegation tokens associated with the filesystem. Normally
945       * a file system returns a single delegation token. A file system that manages
946       * multiple file systems underneath, could return set of delegation tokens for
947       * all the file systems it manages
948       * 
949       * @param renewer the account name that is allowed to renew the token.
950       * @return List of delegation tokens.
951       *   If delegation tokens not supported then return a list of size zero.
952       * @throws IOException
953       */
954      @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
955      public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
956        return new ArrayList<Token<?>>(0);
957      }
958      
959      @Override //Object
960      public int hashCode() {
961        return myUri.hashCode();
962      }
963      
964      @Override //Object
965      public boolean equals(Object other) {
966        if (other == null || !(other instanceof AbstractFileSystem)) {
967          return false;
968        }
969        return myUri.equals(((AbstractFileSystem) other).myUri);
970      }
971    }