001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.fs;
019    
020    
021    import java.io.FileNotFoundException;
022    import java.io.IOException;
023    import java.lang.reflect.Constructor;
024    import java.net.URI;
025    import java.net.URISyntaxException;
026    import java.util.ArrayList;
027    import java.util.EnumSet;
028    import java.util.HashMap;
029    import java.util.List;
030    import java.util.Map;
031    import java.util.NoSuchElementException;
032    import java.util.StringTokenizer;
033    import java.util.concurrent.ConcurrentHashMap;
034    
035    import org.apache.commons.logging.Log;
036    import org.apache.commons.logging.LogFactory;
037    import org.apache.hadoop.HadoopIllegalArgumentException;
038    import org.apache.hadoop.classification.InterfaceAudience;
039    import org.apache.hadoop.classification.InterfaceStability;
040    import org.apache.hadoop.conf.Configuration;
041    import org.apache.hadoop.fs.FileSystem.Statistics;
042    import org.apache.hadoop.fs.Options.CreateOpts;
043    import org.apache.hadoop.fs.Options.Rename;
044    import org.apache.hadoop.fs.permission.FsPermission;
045    import org.apache.hadoop.fs.InvalidPathException;
046    import org.apache.hadoop.security.AccessControlException;
047    import org.apache.hadoop.security.SecurityUtil;
048    import org.apache.hadoop.security.token.Token;
049    import org.apache.hadoop.util.Progressable;
050    
051    /**
052     * This class provides an interface for implementors of a Hadoop file system
053     * (analogous to the VFS of Unix). Applications do not access this class;
054     * instead they access files across all file systems using {@link FileContext}.
055     * 
056     * Pathnames passed to AbstractFileSystem can be fully qualified URI that
057     * matches the "this" file system (ie same scheme and authority) 
058     * or a Slash-relative name that is assumed to be relative
059     * to the root of the "this" file system .
060     */
061    @InterfaceAudience.Public
062    @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
063    public abstract class AbstractFileSystem {
064      static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
065    
066      /** Recording statistics per a file system class. */
067      private static final Map<URI, Statistics> 
068          STATISTICS_TABLE = new HashMap<URI, Statistics>();
069      
070      /** Cache of constructors for each file system class. */
071      private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 
072        new ConcurrentHashMap<Class<?>, Constructor<?>>();
073      
074      private static final Class<?>[] URI_CONFIG_ARGS = 
075        new Class[]{URI.class, Configuration.class};
076      
077      /** The statistics for this file system. */
078      protected Statistics statistics;
079      
080      private final URI myUri;
081      
082      public Statistics getStatistics() {
083        return statistics;
084      }
085      
086      /**
087       * Prohibits names which contain a ".", "..", ":" or "/" 
088       */
089      private static boolean isValidName(String src) {
090        // Check for ".." "." ":" "/"
091        StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
092        while(tokens.hasMoreTokens()) {
093          String element = tokens.nextToken();
094          if (element.equals("target/generated-sources") ||
095              element.equals(".")  ||
096              (element.indexOf(":") >= 0)) {
097            return false;
098          }
099        }
100        return true;
101      }
102      
103      /** 
104       * Create an object for the given class and initialize it from conf.
105       * @param theClass class of which an object is created
106       * @param conf Configuration
107       * @return a new object
108       */
109      @SuppressWarnings("unchecked")
110      static <T> T newInstance(Class<T> theClass,
111        URI uri, Configuration conf) {
112        T result;
113        try {
114          Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
115          if (meth == null) {
116            meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS);
117            meth.setAccessible(true);
118            CONSTRUCTOR_CACHE.put(theClass, meth);
119          }
120          result = meth.newInstance(uri, conf);
121        } catch (Exception e) {
122          throw new RuntimeException(e);
123        }
124        return result;
125      }
126      
127      /**
128       * Create a file system instance for the specified uri using the conf. The
129       * conf is used to find the class name that implements the file system. The
130       * conf is also passed to the file system for its configuration.
131       *
132       * @param uri URI of the file system
133       * @param conf Configuration for the file system
134       * 
135       * @return Returns the file system for the given URI
136       *
137       * @throws UnsupportedFileSystemException file system for <code>uri</code> is
138       *           not found
139       */
140      public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
141          throws UnsupportedFileSystemException {
142        Class<?> clazz = conf.getClass("fs.AbstractFileSystem." + 
143                                    uri.getScheme() + ".impl", null);
144        if (clazz == null) {
145          throw new UnsupportedFileSystemException(
146              "No AbstractFileSystem for scheme: " + uri.getScheme());
147        }
148        return (AbstractFileSystem) newInstance(clazz, uri, conf);
149      }
150    
151      /**
152       * Get the statistics for a particular file system.
153       * 
154       * @param uri
155       *          used as key to lookup STATISTICS_TABLE. Only scheme and authority
156       *          part of the uri are used.
157       * @return a statistics object
158       */
159      protected static synchronized Statistics getStatistics(URI uri) {
160        String scheme = uri.getScheme();
161        if (scheme == null) {
162          throw new IllegalArgumentException("Scheme not defined in the uri: "
163              + uri);
164        }
165        URI baseUri = getBaseUri(uri);
166        Statistics result = STATISTICS_TABLE.get(baseUri);
167        if (result == null) {
168          result = new Statistics(scheme);
169          STATISTICS_TABLE.put(baseUri, result);
170        }
171        return result;
172      }
173      
174      private static URI getBaseUri(URI uri) {
175        String scheme = uri.getScheme();
176        String authority = uri.getAuthority();
177        String baseUriString = scheme + "://";
178        if (authority != null) {
179          baseUriString = baseUriString + authority;
180        } else {
181          baseUriString = baseUriString + "/";
182        }
183        return URI.create(baseUriString);
184      }
185      
186      public static synchronized void clearStatistics() {
187        for(Statistics stat: STATISTICS_TABLE.values()) {
188          stat.reset();
189        }
190      }
191    
192      /**
193       * Prints statistics for all file systems.
194       */
195      public static synchronized void printStatistics() {
196        for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
197          System.out.println("  FileSystem " + pair.getKey().getScheme() + "://"
198              + pair.getKey().getAuthority() + ": " + pair.getValue());
199        }
200      }
201      
202      protected static synchronized Map<URI, Statistics> getAllStatistics() {
203        Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
204            STATISTICS_TABLE.size());
205        for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
206          URI key = pair.getKey();
207          Statistics value = pair.getValue();
208          Statistics newStatsObj = new Statistics(value);
209          statsMap.put(URI.create(key.toString()), newStatsObj);
210        }
211        return statsMap;
212      }
213    
214      /**
215       * The main factory method for creating a file system. Get a file system for
216       * the URI's scheme and authority. The scheme of the <code>uri</code>
217       * determines a configuration property name,
218       * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
219       * AbstractFileSystem class.
220       * 
221       * The entire URI and conf is passed to the AbstractFileSystem factory method.
222       * 
223       * @param uri for the file system to be created.
224       * @param conf which is passed to the file system impl.
225       * 
226       * @return file system for the given URI.
227       * 
228       * @throws UnsupportedFileSystemException if the file system for
229       *           <code>uri</code> is not supported.
230       */
231      public static AbstractFileSystem get(final URI uri, final Configuration conf)
232          throws UnsupportedFileSystemException {
233        return createFileSystem(uri, conf);
234      }
235    
236      /**
237       * Constructor to be called by subclasses.
238       * 
239       * @param uri for this file system.
240       * @param supportedScheme the scheme supported by the implementor
241       * @param authorityNeeded if true then theURI must have authority, if false
242       *          then the URI must have null authority.
243       *
244       * @throws URISyntaxException <code>uri</code> has syntax error
245       */
246      public AbstractFileSystem(final URI uri, final String supportedScheme,
247          final boolean authorityNeeded, final int defaultPort)
248          throws URISyntaxException {
249        myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort);
250        statistics = getStatistics(uri); 
251      }
252      
253      /**
254       * Check that the Uri's scheme matches
255       * @param uri
256       * @param supportedScheme
257       */
258      public void checkScheme(URI uri, String supportedScheme) {
259        String scheme = uri.getScheme();
260        if (scheme == null) {
261          throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
262        }
263        if (!scheme.equals(supportedScheme)) {
264          throw new HadoopIllegalArgumentException("Uri scheme " + uri
265              + " does not match the scheme " + supportedScheme);
266        }
267      }
268    
269      /**
270       * Get the URI for the file system based on the given URI. The path, query
271       * part of the given URI is stripped out and default file system port is used
272       * to form the URI.
273       * 
274       * @param uri FileSystem URI.
275       * @param authorityNeeded if true authority cannot be null in the URI. If
276       *          false authority must be null.
277       * @param defaultPort default port to use if port is not specified in the URI.
278       * 
279       * @return URI of the file system
280       * 
281       * @throws URISyntaxException <code>uri</code> has syntax error
282       */
283      private URI getUri(URI uri, String supportedScheme,
284          boolean authorityNeeded, int defaultPort) throws URISyntaxException {
285        checkScheme(uri, supportedScheme);
286        // A file system implementation that requires authority must always
287        // specify default port
288        if (defaultPort < 0 && authorityNeeded) {
289          throw new HadoopIllegalArgumentException(
290              "FileSystem implementation error -  default port " + defaultPort
291                  + " is not valid");
292        }
293        String authority = uri.getAuthority();
294        if (authority == null) {
295           if (authorityNeeded) {
296             throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
297           } else {
298             return new URI(supportedScheme + ":///");
299           }   
300        }
301        // authority is non null  - AuthorityNeeded may be true or false.
302        int port = uri.getPort();
303        port = (port == -1 ? defaultPort : port);
304        if (port == -1) { // no port supplied and default port is not specified
305          return new URI(supportedScheme, authority, "/", null);
306        }
307        return new URI(supportedScheme + "://" + uri.getHost() + ":" + port);
308      }
309      
310      /**
311       * The default port of this file system.
312       * 
313       * @return default port of this file system's Uri scheme
314       *         A uri with a port of -1 => default port;
315       */
316      public abstract int getUriDefaultPort();
317    
318      /**
319       * Returns a URI whose scheme and authority identify this FileSystem.
320       * 
321       * @return the uri of this file system.
322       */
323      public URI getUri() {
324        return myUri;
325      }
326      
327      /**
328       * Check that a Path belongs to this FileSystem.
329       * 
330       * If the path is fully qualified URI, then its scheme and authority
331       * matches that of this file system. Otherwise the path must be 
332       * slash-relative name.
333       * 
334       * @throws InvalidPathException if the path is invalid
335       */
336      public void checkPath(Path path) {
337        URI uri = path.toUri();
338        String thatScheme = uri.getScheme();
339        String thatAuthority = uri.getAuthority();
340        if (thatScheme == null) {
341          if (thatAuthority == null) {
342            if (path.isUriPathAbsolute()) {
343              return;
344            }
345            throw new InvalidPathException("relative paths not allowed:" + 
346                path);
347          } else {
348            throw new InvalidPathException(
349                "Path without scheme with non-null autorhrity:" + path);
350          }
351        }
352        String thisScheme = this.getUri().getScheme();
353        String thisAuthority = this.getUri().getAuthority();
354        
355        // Schemes and authorities must match.
356        // Allow for null Authority for file:///
357        if (!thisScheme.equalsIgnoreCase(thatScheme) ||
358           (thisAuthority != null && 
359                !thisAuthority.equalsIgnoreCase(thatAuthority)) ||
360           (thisAuthority == null && thatAuthority != null)) {
361          throw new InvalidPathException("Wrong FS: " + path + ", expected: "
362              + this.getUri());
363        }
364        
365        int thisPort = this.getUri().getPort();
366        int thatPort = path.toUri().getPort();
367        if (thatPort == -1) { // -1 => defaultPort of Uri scheme
368          thatPort = this.getUriDefaultPort();
369        }
370        if (thisPort != thatPort) {
371          throw new InvalidPathException("Wrong FS: " + path + ", expected: "
372              + this.getUri());
373        }
374      }
375      
376      /**
377       * Get the path-part of a pathname. Checks that URI matches this file system
378       * and that the path-part is a valid name.
379       * 
380       * @param p path
381       * 
382       * @return path-part of the Path p
383       */
384      public String getUriPath(final Path p) {
385        checkPath(p);
386        String s = p.toUri().getPath();
387        if (!isValidName(s)) {
388          throw new InvalidPathException("Path part " + s + " from URI " + p
389              + " is not a valid filename.");
390        }
391        return s;
392      }
393      
394      /**
395       * Make the path fully qualified to this file system
396       * @param path
397       * @return the qualified path
398       */
399      public Path makeQualified(Path path) {
400        checkPath(path);
401        return path.makeQualified(this.getUri(), null);
402      }
403      
404      /**
405       * Some file systems like LocalFileSystem have an initial workingDir
406       * that is used as the starting workingDir. For other file systems
407       * like HDFS there is no built in notion of an initial workingDir.
408       * 
409       * @return the initial workingDir if the file system has such a notion
410       *         otherwise return a null.
411       */
412      public Path getInitialWorkingDirectory() {
413        return null;
414      }
415      
416      /** 
417       * Return the current user's home directory in this file system.
418       * The default implementation returns "/user/$USER/".
419       * 
420       * @return current user's home directory.
421       */
422      public Path getHomeDirectory() {
423        return new Path("/user/"+System.getProperty("user.name")).makeQualified(
424                                                                    getUri(), null);
425      }
426      
427      /**
428       * Return a set of server default configuration values.
429       * 
430       * @return server default configuration values
431       * 
432       * @throws IOException an I/O error occurred
433       */
434      public abstract FsServerDefaults getServerDefaults() throws IOException; 
435    
436      /**
437       * Return the fully-qualified path of path f resolving the path
438       * through any internal symlinks or mount point
439       * @param p path to be resolved
440       * @return fully qualified path 
441       * @throws FileNotFoundException, AccessControlException, IOException
442       *         UnresolvedLinkException if symbolic link on path cannot be resolved
443       *          internally
444       */
445       public Path resolvePath(final Path p) throws FileNotFoundException,
446               UnresolvedLinkException, AccessControlException, IOException {
447         checkPath(p);
448         return getFileStatus(p).getPath(); // default impl is to return the path
449       }
450      
451      /**
452       * The specification of this method matches that of
453       * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
454       * that the Path f must be fully qualified and the permission is absolute
455       * (i.e. umask has been applied).
456       */
457      public final FSDataOutputStream create(final Path f,
458          final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
459          throws AccessControlException, FileAlreadyExistsException,
460          FileNotFoundException, ParentNotDirectoryException,
461          UnsupportedFileSystemException, UnresolvedLinkException, IOException {
462        checkPath(f);
463        int bufferSize = -1;
464        short replication = -1;
465        long blockSize = -1;
466        int bytesPerChecksum = -1;
467        FsPermission permission = null;
468        Progressable progress = null;
469        Boolean createParent = null;
470     
471        for (CreateOpts iOpt : opts) {
472          if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
473            if (blockSize != -1) {
474              throw new HadoopIllegalArgumentException(
475                  "BlockSize option is set multiple times");
476            }
477            blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
478          } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
479            if (bufferSize != -1) {
480              throw new HadoopIllegalArgumentException(
481                  "BufferSize option is set multiple times");
482            }
483            bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
484          } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
485            if (replication != -1) {
486              throw new HadoopIllegalArgumentException(
487                  "ReplicationFactor option is set multiple times");
488            }
489            replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
490          } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
491            if (bytesPerChecksum != -1) {
492              throw new HadoopIllegalArgumentException(
493                  "BytesPerChecksum option is set multiple times");
494            }
495            bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
496          } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
497            if (permission != null) {
498              throw new HadoopIllegalArgumentException(
499                  "Perms option is set multiple times");
500            }
501            permission = ((CreateOpts.Perms) iOpt).getValue();
502          } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
503            if (progress != null) {
504              throw new HadoopIllegalArgumentException(
505                  "Progress option is set multiple times");
506            }
507            progress = ((CreateOpts.Progress) iOpt).getValue();
508          } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
509            if (createParent != null) {
510              throw new HadoopIllegalArgumentException(
511                  "CreateParent option is set multiple times");
512            }
513            createParent = ((CreateOpts.CreateParent) iOpt).getValue();
514          } else {
515            throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
516                iOpt.getClass().getName());
517          }
518        }
519        if (permission == null) {
520          throw new HadoopIllegalArgumentException("no permission supplied");
521        }
522    
523    
524        FsServerDefaults ssDef = getServerDefaults();
525        if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
526          throw new IOException("Internal error: default blockSize is" + 
527              " not a multiple of default bytesPerChecksum ");
528        }
529        
530        if (blockSize == -1) {
531          blockSize = ssDef.getBlockSize();
532        }
533        if (bytesPerChecksum == -1) {
534          bytesPerChecksum = ssDef.getBytesPerChecksum();
535        }
536        if (bufferSize == -1) {
537          bufferSize = ssDef.getFileBufferSize();
538        }
539        if (replication == -1) {
540          replication = ssDef.getReplication();
541        }
542        if (createParent == null) {
543          createParent = false;
544        }
545    
546        if (blockSize % bytesPerChecksum != 0) {
547          throw new HadoopIllegalArgumentException(
548                 "blockSize should be a multiple of checksumsize");
549        }
550    
551        return this.createInternal(f, createFlag, permission, bufferSize,
552          replication, blockSize, progress, bytesPerChecksum, createParent);
553      }
554    
555      /**
556       * The specification of this method matches that of
557       * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
558       * have been declared explicitly.
559       */
560      public abstract FSDataOutputStream createInternal(Path f,
561          EnumSet<CreateFlag> flag, FsPermission absolutePermission,
562          int bufferSize, short replication, long blockSize, Progressable progress,
563          int bytesPerChecksum, boolean createParent)
564          throws AccessControlException, FileAlreadyExistsException,
565          FileNotFoundException, ParentNotDirectoryException,
566          UnsupportedFileSystemException, UnresolvedLinkException, IOException;
567    
568      /**
569       * The specification of this method matches that of
570       * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
571       * f must be fully qualified and the permission is absolute (i.e. 
572       * umask has been applied).
573       */
574      public abstract void mkdir(final Path dir, final FsPermission permission,
575          final boolean createParent) throws AccessControlException,
576          FileAlreadyExistsException, FileNotFoundException,
577          UnresolvedLinkException, IOException;
578    
579      /**
580       * The specification of this method matches that of
581       * {@link FileContext#delete(Path, boolean)} except that Path f must be for
582       * this file system.
583       */
584      public abstract boolean delete(final Path f, final boolean recursive)
585          throws AccessControlException, FileNotFoundException,
586          UnresolvedLinkException, IOException;
587    
588      /**
589       * The specification of this method matches that of
590       * {@link FileContext#open(Path)} except that Path f must be for this
591       * file system.
592       */
593      public FSDataInputStream open(final Path f) throws AccessControlException,
594          FileNotFoundException, UnresolvedLinkException, IOException {
595        return open(f, getServerDefaults().getFileBufferSize());
596      }
597    
598      /**
599       * The specification of this method matches that of
600       * {@link FileContext#open(Path, int)} except that Path f must be for this
601       * file system.
602       */
603      public abstract FSDataInputStream open(final Path f, int bufferSize)
604          throws AccessControlException, FileNotFoundException,
605          UnresolvedLinkException, IOException;
606    
607      /**
608       * The specification of this method matches that of
609       * {@link FileContext#setReplication(Path, short)} except that Path f must be
610       * for this file system.
611       */
612      public abstract boolean setReplication(final Path f,
613          final short replication) throws AccessControlException,
614          FileNotFoundException, UnresolvedLinkException, IOException;
615    
616      /**
617       * The specification of this method matches that of
618       * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
619       * f must be for this file system.
620       */
621      public final void rename(final Path src, final Path dst,
622          final Options.Rename... options) throws AccessControlException,
623          FileAlreadyExistsException, FileNotFoundException,
624          ParentNotDirectoryException, UnresolvedLinkException, IOException {
625        boolean overwrite = false;
626        if (null != options) {
627          for (Rename option : options) {
628            if (option == Rename.OVERWRITE) {
629              overwrite = true;
630            }
631          }
632        }
633        renameInternal(src, dst, overwrite);
634      }
635      
636      /**
637       * The specification of this method matches that of
638       * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
639       * f must be for this file system and NO OVERWRITE is performed.
640       * 
641       * File systems that do not have a built in overwrite need implement only this
642       * method and can take advantage of the default impl of the other
643       * {@link #renameInternal(Path, Path, boolean)}
644       */
645      public abstract void renameInternal(final Path src, final Path dst)
646          throws AccessControlException, FileAlreadyExistsException,
647          FileNotFoundException, ParentNotDirectoryException,
648          UnresolvedLinkException, IOException;
649      
650      /**
651       * The specification of this method matches that of
652       * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
653       * f must be for this file system.
654       */
655      public void renameInternal(final Path src, final Path dst,
656          boolean overwrite) throws AccessControlException,
657          FileAlreadyExistsException, FileNotFoundException,
658          ParentNotDirectoryException, UnresolvedLinkException, IOException {
659        // Default implementation deals with overwrite in a non-atomic way
660        final FileStatus srcStatus = getFileLinkStatus(src);
661    
662        FileStatus dstStatus;
663        try {
664          dstStatus = getFileLinkStatus(dst);
665        } catch (IOException e) {
666          dstStatus = null;
667        }
668        if (dstStatus != null) {
669          if (dst.equals(src)) {
670            throw new FileAlreadyExistsException(
671                "The source "+src+" and destination "+dst+" are the same");
672          }
673          if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) {
674            throw new FileAlreadyExistsException(
675                "Cannot rename symlink "+src+" to its target "+dst);
676          }
677          // It's OK to rename a file to a symlink and vice versa
678          if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
679            throw new IOException("Source " + src + " and destination " + dst
680                + " must both be directories");
681          }
682          if (!overwrite) {
683            throw new FileAlreadyExistsException("Rename destination " + dst
684                + " already exists.");
685          }
686          // Delete the destination that is a file or an empty directory
687          if (dstStatus.isDirectory()) {
688            RemoteIterator<FileStatus> list = listStatusIterator(dst);
689            if (list != null && list.hasNext()) {
690              throw new IOException(
691                  "Rename cannot overwrite non empty destination directory " + dst);
692            }
693          }
694          delete(dst, false);
695        } else {
696          final Path parent = dst.getParent();
697          final FileStatus parentStatus = getFileStatus(parent);
698          if (parentStatus.isFile()) {
699            throw new ParentNotDirectoryException("Rename destination parent "
700                + parent + " is a file.");
701          }
702        }
703        renameInternal(src, dst);
704      }
705      
706      /**
707       * Returns true if the file system supports symlinks, false otherwise.
708       */
709      public boolean supportsSymlinks() {
710        return false;
711      }
712      
713      /**
714       * The specification of this method matches that of  
715       * {@link FileContext#createSymlink(Path, Path, boolean)};
716       */
717      public void createSymlink(final Path target, final Path link,
718          final boolean createParent) throws IOException, UnresolvedLinkException {
719        throw new IOException("File system does not support symlinks");    
720      }
721    
722      /**
723       * The specification of this method matches that of  
724       * {@link FileContext#getLinkTarget(Path)};
725       */
726      public Path getLinkTarget(final Path f) throws IOException {
727        /* We should never get here. Any file system that threw an
728         * UnresolvedLinkException, causing this function to be called,
729         * needs to override this method.
730         */
731        throw new AssertionError();
732      }
733        
734      /**
735       * The specification of this method matches that of
736       * {@link FileContext#setPermission(Path, FsPermission)} except that Path f
737       * must be for this file system.
738       */
739      public abstract void setPermission(final Path f,
740          final FsPermission permission) throws AccessControlException,
741          FileNotFoundException, UnresolvedLinkException, IOException;
742    
743      /**
744       * The specification of this method matches that of
745       * {@link FileContext#setOwner(Path, String, String)} except that Path f must
746       * be for this file system.
747       */
748      public abstract void setOwner(final Path f, final String username,
749          final String groupname) throws AccessControlException,
750          FileNotFoundException, UnresolvedLinkException, IOException;
751    
752      /**
753       * The specification of this method matches that of
754       * {@link FileContext#setTimes(Path, long, long)} except that Path f must be
755       * for this file system.
756       */
757      public abstract void setTimes(final Path f, final long mtime,
758        final long atime) throws AccessControlException, FileNotFoundException,
759          UnresolvedLinkException, IOException;
760    
761      /**
762       * The specification of this method matches that of
763       * {@link FileContext#getFileChecksum(Path)} except that Path f must be for
764       * this file system.
765       */
766      public abstract FileChecksum getFileChecksum(final Path f)
767          throws AccessControlException, FileNotFoundException,
768          UnresolvedLinkException, IOException;
769      
770      /**
771       * The specification of this method matches that of
772       * {@link FileContext#getFileStatus(Path)} 
773       * except that an UnresolvedLinkException may be thrown if a symlink is 
774       * encountered in the path.
775       */
776      public abstract FileStatus getFileStatus(final Path f)
777          throws AccessControlException, FileNotFoundException,
778          UnresolvedLinkException, IOException;
779    
780      /**
781       * The specification of this method matches that of
782       * {@link FileContext#getFileLinkStatus(Path)}
783       * except that an UnresolvedLinkException may be thrown if a symlink is  
784       * encountered in the path leading up to the final path component.
785       * If the file system does not support symlinks then the behavior is
786       * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.
787       */
788      public FileStatus getFileLinkStatus(final Path f)
789          throws AccessControlException, FileNotFoundException,
790          UnsupportedFileSystemException, IOException {
791        return getFileStatus(f);
792      }
793    
794      /**
795       * The specification of this method matches that of
796       * {@link FileContext#getFileBlockLocations(Path, long, long)} except that
797       * Path f must be for this file system.
798       */
799      public abstract BlockLocation[] getFileBlockLocations(final Path f,
800          final long start, final long len) throws AccessControlException,
801          FileNotFoundException, UnresolvedLinkException, IOException;
802    
803      /**
804       * The specification of this method matches that of
805       * {@link FileContext#getFsStatus(Path)} except that Path f must be for this
806       * file system.
807       */
808      public FsStatus getFsStatus(final Path f) throws AccessControlException,
809          FileNotFoundException, UnresolvedLinkException, IOException {
810        // default impl gets FsStatus of root
811        return getFsStatus();
812      }
813      
814      /**
815       * The specification of this method matches that of
816       * {@link FileContext#getFsStatus(Path)}.
817       */
818      public abstract FsStatus getFsStatus() throws AccessControlException,
819          FileNotFoundException, IOException;
820    
821      /**
822       * The specification of this method matches that of
823       * {@link FileContext#listStatus(Path)} except that Path f must be for this
824       * file system.
825       */
826      public RemoteIterator<FileStatus> listStatusIterator(final Path f)
827          throws AccessControlException, FileNotFoundException,
828          UnresolvedLinkException, IOException {
829        return new RemoteIterator<FileStatus>() {
830          private int i = 0;
831          private FileStatus[] statusList = listStatus(f);
832          
833          @Override
834          public boolean hasNext() {
835            return i < statusList.length;
836          }
837          
838          @Override
839          public FileStatus next() {
840            if (!hasNext()) {
841              throw new NoSuchElementException();
842            }
843            return statusList[i++];
844          }
845        };
846      }
847    
848      /**
849       * The specification of this method matches that of
850       * {@link FileContext#listLocatedStatus(Path)} except that Path f 
851       * must be for this file system.
852       */
853      public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
854          throws AccessControlException, FileNotFoundException,
855          UnresolvedLinkException, IOException {
856        return new RemoteIterator<LocatedFileStatus>() {
857          private RemoteIterator<FileStatus> itor = listStatusIterator(f);
858          
859          @Override
860          public boolean hasNext() throws IOException {
861            return itor.hasNext();
862          }
863          
864          @Override
865          public LocatedFileStatus next() throws IOException {
866            if (!hasNext()) {
867              throw new NoSuchElementException("No more entry in " + f);
868            }
869            FileStatus result = itor.next();
870            BlockLocation[] locs = null;
871            if (result.isFile()) {
872              locs = getFileBlockLocations(
873                  result.getPath(), 0, result.getLen());
874            }
875            return new LocatedFileStatus(result, locs);
876          }
877        };
878      }
879    
880      /**
881       * The specification of this method matches that of
882       * {@link FileContext.Util#listStatus(Path)} except that Path f must be 
883       * for this file system.
884       */
885      public abstract FileStatus[] listStatus(final Path f)
886          throws AccessControlException, FileNotFoundException,
887          UnresolvedLinkException, IOException;
888    
889      /**
890       * @return an iterator over the corrupt files under the given path
891       * (may contain duplicates if a file has more than one corrupt block)
892       * @throws IOException
893       */
894      public RemoteIterator<Path> listCorruptFileBlocks(Path path)
895        throws IOException {
896        throw new UnsupportedOperationException(getClass().getCanonicalName() +
897                                                " does not support" +
898                                                " listCorruptFileBlocks");
899      }
900    
901      /**
902       * The specification of this method matches that of
903       * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
904       * must be for this file system.
905       */
906      public abstract void setVerifyChecksum(final boolean verifyChecksum)
907          throws AccessControlException, IOException;
908      
909      /**
910       * Get a canonical name for this file system.
911       * @return a URI string that uniquely identifies this file system
912       */
913      public String getCanonicalServiceName() {
914        return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort());
915      }
916      
917      /**
918       * Get one or more delegation tokens associated with the filesystem. Normally
919       * a file system returns a single delegation token. A file system that manages
920       * multiple file systems underneath, could return set of delegation tokens for
921       * all the file systems it manages
922       * 
923       * @param renewer the account name that is allowed to renew the token.
924       * @return List of delegation tokens.
925       *   If delegation tokens not supported then return a list of size zero.
926       * @throws IOException
927       */
928      @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
929      public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
930        return new ArrayList<Token<?>>(0);
931      }
932      
933      @Override //Object
934      public int hashCode() {
935        return myUri.hashCode();
936      }
937      
938      @Override //Object
939      public boolean equals(Object other) {
940        if (other == null || !(other instanceof AbstractFileSystem)) {
941          return false;
942        }
943        return myUri.equals(((AbstractFileSystem) other).myUri);
944      }
945    }