001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.fs;
019
020
021import java.io.FileNotFoundException;
022import java.io.IOException;
023import java.lang.reflect.Constructor;
024import java.net.URI;
025import java.net.URISyntaxException;
026import java.util.ArrayList;
027import java.util.EnumSet;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.NoSuchElementException;
032import java.util.StringTokenizer;
033import java.util.concurrent.ConcurrentHashMap;
034
035import org.apache.commons.logging.Log;
036import org.apache.commons.logging.LogFactory;
037import org.apache.hadoop.HadoopIllegalArgumentException;
038import org.apache.hadoop.classification.InterfaceAudience;
039import org.apache.hadoop.classification.InterfaceStability;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.fs.FileSystem.Statistics;
042import org.apache.hadoop.fs.Options.ChecksumOpt;
043import org.apache.hadoop.fs.Options.CreateOpts;
044import org.apache.hadoop.fs.Options.Rename;
045import org.apache.hadoop.fs.permission.FsPermission;
046import org.apache.hadoop.fs.InvalidPathException;
047import org.apache.hadoop.security.AccessControlException;
048import org.apache.hadoop.security.SecurityUtil;
049import org.apache.hadoop.security.token.Token;
050import org.apache.hadoop.util.Progressable;
051
052/**
053 * This class provides an interface for implementors of a Hadoop file system
054 * (analogous to the VFS of Unix). Applications do not access this class;
055 * instead they access files across all file systems using {@link FileContext}.
056 * 
057 * Pathnames passed to AbstractFileSystem can be fully qualified URI that
058 * matches the "this" file system (ie same scheme and authority) 
059 * or a Slash-relative name that is assumed to be relative
060 * to the root of the "this" file system .
061 */
062@InterfaceAudience.Public
063@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
064public abstract class AbstractFileSystem {
065  static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
066
067  /** Recording statistics per a file system class. */
068  private static final Map<URI, Statistics> 
069      STATISTICS_TABLE = new HashMap<URI, Statistics>();
070  
071  /** Cache of constructors for each file system class. */
072  private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 
073    new ConcurrentHashMap<Class<?>, Constructor<?>>();
074  
075  private static final Class<?>[] URI_CONFIG_ARGS = 
076    new Class[]{URI.class, Configuration.class};
077  
078  /** The statistics for this file system. */
079  protected Statistics statistics;
080  
081  private final URI myUri;
082  
083  public Statistics getStatistics() {
084    return statistics;
085  }
086  
087  /**
088   * Prohibits names which contain a ".", "..", ":" or "/" 
089   */
090  private static boolean isValidName(String src) {
091    // Check for ".." "." ":" "/"
092    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
093    while(tokens.hasMoreTokens()) {
094      String element = tokens.nextToken();
095      if (element.equals("target/generated-sources") ||
096          element.equals(".")  ||
097          (element.indexOf(":") >= 0)) {
098        return false;
099      }
100    }
101    return true;
102  }
103  
104  /** 
105   * Create an object for the given class and initialize it from conf.
106   * @param theClass class of which an object is created
107   * @param conf Configuration
108   * @return a new object
109   */
110  @SuppressWarnings("unchecked")
111  static <T> T newInstance(Class<T> theClass,
112    URI uri, Configuration conf) {
113    T result;
114    try {
115      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
116      if (meth == null) {
117        meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS);
118        meth.setAccessible(true);
119        CONSTRUCTOR_CACHE.put(theClass, meth);
120      }
121      result = meth.newInstance(uri, conf);
122    } catch (Exception e) {
123      throw new RuntimeException(e);
124    }
125    return result;
126  }
127  
128  /**
129   * Create a file system instance for the specified uri using the conf. The
130   * conf is used to find the class name that implements the file system. The
131   * conf is also passed to the file system for its configuration.
132   *
133   * @param uri URI of the file system
134   * @param conf Configuration for the file system
135   * 
136   * @return Returns the file system for the given URI
137   *
138   * @throws UnsupportedFileSystemException file system for <code>uri</code> is
139   *           not found
140   */
141  public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
142      throws UnsupportedFileSystemException {
143    Class<?> clazz = conf.getClass("fs.AbstractFileSystem." + 
144                                uri.getScheme() + ".impl", null);
145    if (clazz == null) {
146      throw new UnsupportedFileSystemException(
147          "No AbstractFileSystem for scheme: " + uri.getScheme());
148    }
149    return (AbstractFileSystem) newInstance(clazz, uri, conf);
150  }
151
152  /**
153   * Get the statistics for a particular file system.
154   * 
155   * @param uri
156   *          used as key to lookup STATISTICS_TABLE. Only scheme and authority
157   *          part of the uri are used.
158   * @return a statistics object
159   */
160  protected static synchronized Statistics getStatistics(URI uri) {
161    String scheme = uri.getScheme();
162    if (scheme == null) {
163      throw new IllegalArgumentException("Scheme not defined in the uri: "
164          + uri);
165    }
166    URI baseUri = getBaseUri(uri);
167    Statistics result = STATISTICS_TABLE.get(baseUri);
168    if (result == null) {
169      result = new Statistics(scheme);
170      STATISTICS_TABLE.put(baseUri, result);
171    }
172    return result;
173  }
174  
175  private static URI getBaseUri(URI uri) {
176    String scheme = uri.getScheme();
177    String authority = uri.getAuthority();
178    String baseUriString = scheme + "://";
179    if (authority != null) {
180      baseUriString = baseUriString + authority;
181    } else {
182      baseUriString = baseUriString + "/";
183    }
184    return URI.create(baseUriString);
185  }
186  
187  public static synchronized void clearStatistics() {
188    for(Statistics stat: STATISTICS_TABLE.values()) {
189      stat.reset();
190    }
191  }
192
193  /**
194   * Prints statistics for all file systems.
195   */
196  public static synchronized void printStatistics() {
197    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
198      System.out.println("  FileSystem " + pair.getKey().getScheme() + "://"
199          + pair.getKey().getAuthority() + ": " + pair.getValue());
200    }
201  }
202  
203  protected static synchronized Map<URI, Statistics> getAllStatistics() {
204    Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
205        STATISTICS_TABLE.size());
206    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
207      URI key = pair.getKey();
208      Statistics value = pair.getValue();
209      Statistics newStatsObj = new Statistics(value);
210      statsMap.put(URI.create(key.toString()), newStatsObj);
211    }
212    return statsMap;
213  }
214
215  /**
216   * The main factory method for creating a file system. Get a file system for
217   * the URI's scheme and authority. The scheme of the <code>uri</code>
218   * determines a configuration property name,
219   * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
220   * AbstractFileSystem class.
221   * 
222   * The entire URI and conf is passed to the AbstractFileSystem factory method.
223   * 
224   * @param uri for the file system to be created.
225   * @param conf which is passed to the file system impl.
226   * 
227   * @return file system for the given URI.
228   * 
229   * @throws UnsupportedFileSystemException if the file system for
230   *           <code>uri</code> is not supported.
231   */
232  public static AbstractFileSystem get(final URI uri, final Configuration conf)
233      throws UnsupportedFileSystemException {
234    return createFileSystem(uri, conf);
235  }
236
237  /**
238   * Constructor to be called by subclasses.
239   * 
240   * @param uri for this file system.
241   * @param supportedScheme the scheme supported by the implementor
242   * @param authorityNeeded if true then theURI must have authority, if false
243   *          then the URI must have null authority.
244   *
245   * @throws URISyntaxException <code>uri</code> has syntax error
246   */
247  public AbstractFileSystem(final URI uri, final String supportedScheme,
248      final boolean authorityNeeded, final int defaultPort)
249      throws URISyntaxException {
250    myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort);
251    statistics = getStatistics(uri); 
252  }
253  
254  /**
255   * Check that the Uri's scheme matches
256   * @param uri
257   * @param supportedScheme
258   */
259  public void checkScheme(URI uri, String supportedScheme) {
260    String scheme = uri.getScheme();
261    if (scheme == null) {
262      throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
263    }
264    if (!scheme.equals(supportedScheme)) {
265      throw new HadoopIllegalArgumentException("Uri scheme " + uri
266          + " does not match the scheme " + supportedScheme);
267    }
268  }
269
270  /**
271   * Get the URI for the file system based on the given URI. The path, query
272   * part of the given URI is stripped out and default file system port is used
273   * to form the URI.
274   * 
275   * @param uri FileSystem URI.
276   * @param authorityNeeded if true authority cannot be null in the URI. If
277   *          false authority must be null.
278   * @param defaultPort default port to use if port is not specified in the URI.
279   * 
280   * @return URI of the file system
281   * 
282   * @throws URISyntaxException <code>uri</code> has syntax error
283   */
284  private URI getUri(URI uri, String supportedScheme,
285      boolean authorityNeeded, int defaultPort) throws URISyntaxException {
286    checkScheme(uri, supportedScheme);
287    // A file system implementation that requires authority must always
288    // specify default port
289    if (defaultPort < 0 && authorityNeeded) {
290      throw new HadoopIllegalArgumentException(
291          "FileSystem implementation error -  default port " + defaultPort
292              + " is not valid");
293    }
294    String authority = uri.getAuthority();
295    if (authority == null) {
296       if (authorityNeeded) {
297         throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
298       } else {
299         return new URI(supportedScheme + ":///");
300       }   
301    }
302    // authority is non null  - AuthorityNeeded may be true or false.
303    int port = uri.getPort();
304    port = (port == -1 ? defaultPort : port);
305    if (port == -1) { // no port supplied and default port is not specified
306      return new URI(supportedScheme, authority, "/", null);
307    }
308    return new URI(supportedScheme + "://" + uri.getHost() + ":" + port);
309  }
310  
311  /**
312   * The default port of this file system.
313   * 
314   * @return default port of this file system's Uri scheme
315   *         A uri with a port of -1 => default port;
316   */
317  public abstract int getUriDefaultPort();
318
319  /**
320   * Returns a URI whose scheme and authority identify this FileSystem.
321   * 
322   * @return the uri of this file system.
323   */
324  public URI getUri() {
325    return myUri;
326  }
327  
328  /**
329   * Check that a Path belongs to this FileSystem.
330   * 
331   * If the path is fully qualified URI, then its scheme and authority
332   * matches that of this file system. Otherwise the path must be 
333   * slash-relative name.
334   * 
335   * @throws InvalidPathException if the path is invalid
336   */
337  public void checkPath(Path path) {
338    URI uri = path.toUri();
339    String thatScheme = uri.getScheme();
340    String thatAuthority = uri.getAuthority();
341    if (thatScheme == null) {
342      if (thatAuthority == null) {
343        if (path.isUriPathAbsolute()) {
344          return;
345        }
346        throw new InvalidPathException("relative paths not allowed:" + 
347            path);
348      } else {
349        throw new InvalidPathException(
350            "Path without scheme with non-null authority:" + path);
351      }
352    }
353    String thisScheme = this.getUri().getScheme();
354    String thisHost = this.getUri().getHost();
355    String thatHost = uri.getHost();
356    
357    // Schemes and hosts must match.
358    // Allow for null Authority for file:///
359    if (!thisScheme.equalsIgnoreCase(thatScheme) ||
360       (thisHost != null && 
361            !thisHost.equalsIgnoreCase(thatHost)) ||
362       (thisHost == null && thatHost != null)) {
363      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
364          + this.getUri());
365    }
366    
367    // Ports must match, unless this FS instance is using the default port, in
368    // which case the port may be omitted from the given URI
369    int thisPort = this.getUri().getPort();
370    int thatPort = uri.getPort();
371    if (thatPort == -1) { // -1 => defaultPort of Uri scheme
372      thatPort = this.getUriDefaultPort();
373    }
374    if (thisPort != thatPort) {
375      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
376          + this.getUri());
377    }
378  }
379  
380  /**
381   * Get the path-part of a pathname. Checks that URI matches this file system
382   * and that the path-part is a valid name.
383   * 
384   * @param p path
385   * 
386   * @return path-part of the Path p
387   */
388  public String getUriPath(final Path p) {
389    checkPath(p);
390    String s = p.toUri().getPath();
391    if (!isValidName(s)) {
392      throw new InvalidPathException("Path part " + s + " from URI " + p
393          + " is not a valid filename.");
394    }
395    return s;
396  }
397  
398  /**
399   * Make the path fully qualified to this file system
400   * @param path
401   * @return the qualified path
402   */
403  public Path makeQualified(Path path) {
404    checkPath(path);
405    return path.makeQualified(this.getUri(), null);
406  }
407  
408  /**
409   * Some file systems like LocalFileSystem have an initial workingDir
410   * that is used as the starting workingDir. For other file systems
411   * like HDFS there is no built in notion of an initial workingDir.
412   * 
413   * @return the initial workingDir if the file system has such a notion
414   *         otherwise return a null.
415   */
416  public Path getInitialWorkingDirectory() {
417    return null;
418  }
419  
420  /** 
421   * Return the current user's home directory in this file system.
422   * The default implementation returns "/user/$USER/".
423   * 
424   * @return current user's home directory.
425   */
426  public Path getHomeDirectory() {
427    return new Path("/user/"+System.getProperty("user.name")).makeQualified(
428                                                                getUri(), null);
429  }
430  
431  /**
432   * Return a set of server default configuration values.
433   * 
434   * @return server default configuration values
435   * 
436   * @throws IOException an I/O error occurred
437   */
438  public abstract FsServerDefaults getServerDefaults() throws IOException; 
439
440  /**
441   * Return the fully-qualified path of path f resolving the path
442   * through any internal symlinks or mount point
443   * @param p path to be resolved
444   * @return fully qualified path 
445   * @throws FileNotFoundException, AccessControlException, IOException
446   *         UnresolvedLinkException if symbolic link on path cannot be resolved
447   *          internally
448   */
449   public Path resolvePath(final Path p) throws FileNotFoundException,
450           UnresolvedLinkException, AccessControlException, IOException {
451     checkPath(p);
452     return getFileStatus(p).getPath(); // default impl is to return the path
453   }
454  
455  /**
456   * The specification of this method matches that of
457   * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
458   * that the Path f must be fully qualified and the permission is absolute
459   * (i.e. umask has been applied).
460   */
461  public final FSDataOutputStream create(final Path f,
462      final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
463      throws AccessControlException, FileAlreadyExistsException,
464      FileNotFoundException, ParentNotDirectoryException,
465      UnsupportedFileSystemException, UnresolvedLinkException, IOException {
466    checkPath(f);
467    int bufferSize = -1;
468    short replication = -1;
469    long blockSize = -1;
470    int bytesPerChecksum = -1;
471    ChecksumOpt checksumOpt = null;
472    FsPermission permission = null;
473    Progressable progress = null;
474    Boolean createParent = null;
475 
476    for (CreateOpts iOpt : opts) {
477      if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
478        if (blockSize != -1) {
479          throw new HadoopIllegalArgumentException(
480              "BlockSize option is set multiple times");
481        }
482        blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
483      } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
484        if (bufferSize != -1) {
485          throw new HadoopIllegalArgumentException(
486              "BufferSize option is set multiple times");
487        }
488        bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
489      } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
490        if (replication != -1) {
491          throw new HadoopIllegalArgumentException(
492              "ReplicationFactor option is set multiple times");
493        }
494        replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
495      } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
496        if (bytesPerChecksum != -1) {
497          throw new HadoopIllegalArgumentException(
498              "BytesPerChecksum option is set multiple times");
499        }
500        bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
501      } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
502        if (checksumOpt != null) {
503          throw new  HadoopIllegalArgumentException(
504              "CreateChecksumType option is set multiple times");
505        }
506        checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
507      } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
508        if (permission != null) {
509          throw new HadoopIllegalArgumentException(
510              "Perms option is set multiple times");
511        }
512        permission = ((CreateOpts.Perms) iOpt).getValue();
513      } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
514        if (progress != null) {
515          throw new HadoopIllegalArgumentException(
516              "Progress option is set multiple times");
517        }
518        progress = ((CreateOpts.Progress) iOpt).getValue();
519      } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
520        if (createParent != null) {
521          throw new HadoopIllegalArgumentException(
522              "CreateParent option is set multiple times");
523        }
524        createParent = ((CreateOpts.CreateParent) iOpt).getValue();
525      } else {
526        throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
527            iOpt.getClass().getName());
528      }
529    }
530    if (permission == null) {
531      throw new HadoopIllegalArgumentException("no permission supplied");
532    }
533
534
535    FsServerDefaults ssDef = getServerDefaults();
536    if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
537      throw new IOException("Internal error: default blockSize is" + 
538          " not a multiple of default bytesPerChecksum ");
539    }
540    
541    if (blockSize == -1) {
542      blockSize = ssDef.getBlockSize();
543    }
544
545    // Create a checksum option honoring user input as much as possible.
546    // If bytesPerChecksum is specified, it will override the one set in
547    // checksumOpt. Any missing value will be filled in using the default.
548    ChecksumOpt defaultOpt = new ChecksumOpt(
549        ssDef.getChecksumType(),
550        ssDef.getBytesPerChecksum());
551    checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
552        checksumOpt, bytesPerChecksum);
553
554    if (bufferSize == -1) {
555      bufferSize = ssDef.getFileBufferSize();
556    }
557    if (replication == -1) {
558      replication = ssDef.getReplication();
559    }
560    if (createParent == null) {
561      createParent = false;
562    }
563
564    if (blockSize % bytesPerChecksum != 0) {
565      throw new HadoopIllegalArgumentException(
566             "blockSize should be a multiple of checksumsize");
567    }
568
569    return this.createInternal(f, createFlag, permission, bufferSize,
570      replication, blockSize, progress, checksumOpt, createParent);
571  }
572
573  /**
574   * The specification of this method matches that of
575   * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
576   * have been declared explicitly.
577   */
578  public abstract FSDataOutputStream createInternal(Path f,
579      EnumSet<CreateFlag> flag, FsPermission absolutePermission,
580      int bufferSize, short replication, long blockSize, Progressable progress,
581      ChecksumOpt checksumOpt, boolean createParent)
582      throws AccessControlException, FileAlreadyExistsException,
583      FileNotFoundException, ParentNotDirectoryException,
584      UnsupportedFileSystemException, UnresolvedLinkException, IOException;
585
586  /**
587   * The specification of this method matches that of
588   * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
589   * f must be fully qualified and the permission is absolute (i.e. 
590   * umask has been applied).
591   */
592  public abstract void mkdir(final Path dir, final FsPermission permission,
593      final boolean createParent) throws AccessControlException,
594      FileAlreadyExistsException, FileNotFoundException,
595      UnresolvedLinkException, IOException;
596
597  /**
598   * The specification of this method matches that of
599   * {@link FileContext#delete(Path, boolean)} except that Path f must be for
600   * this file system.
601   */
602  public abstract boolean delete(final Path f, final boolean recursive)
603      throws AccessControlException, FileNotFoundException,
604      UnresolvedLinkException, IOException;
605
606  /**
607   * The specification of this method matches that of
608   * {@link FileContext#open(Path)} except that Path f must be for this
609   * file system.
610   */
611  public FSDataInputStream open(final Path f) throws AccessControlException,
612      FileNotFoundException, UnresolvedLinkException, IOException {
613    return open(f, getServerDefaults().getFileBufferSize());
614  }
615
616  /**
617   * The specification of this method matches that of
618   * {@link FileContext#open(Path, int)} except that Path f must be for this
619   * file system.
620   */
621  public abstract FSDataInputStream open(final Path f, int bufferSize)
622      throws AccessControlException, FileNotFoundException,
623      UnresolvedLinkException, IOException;
624
625  /**
626   * The specification of this method matches that of
627   * {@link FileContext#setReplication(Path, short)} except that Path f must be
628   * for this file system.
629   */
630  public abstract boolean setReplication(final Path f,
631      final short replication) throws AccessControlException,
632      FileNotFoundException, UnresolvedLinkException, IOException;
633
634  /**
635   * The specification of this method matches that of
636   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
637   * f must be for this file system.
638   */
639  public final void rename(final Path src, final Path dst,
640      final Options.Rename... options) throws AccessControlException,
641      FileAlreadyExistsException, FileNotFoundException,
642      ParentNotDirectoryException, UnresolvedLinkException, IOException {
643    boolean overwrite = false;
644    if (null != options) {
645      for (Rename option : options) {
646        if (option == Rename.OVERWRITE) {
647          overwrite = true;
648        }
649      }
650    }
651    renameInternal(src, dst, overwrite);
652  }
653  
654  /**
655   * The specification of this method matches that of
656   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
657   * f must be for this file system and NO OVERWRITE is performed.
658   * 
659   * File systems that do not have a built in overwrite need implement only this
660   * method and can take advantage of the default impl of the other
661   * {@link #renameInternal(Path, Path, boolean)}
662   */
663  public abstract void renameInternal(final Path src, final Path dst)
664      throws AccessControlException, FileAlreadyExistsException,
665      FileNotFoundException, ParentNotDirectoryException,
666      UnresolvedLinkException, IOException;
667  
668  /**
669   * The specification of this method matches that of
670   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
671   * f must be for this file system.
672   */
673  public void renameInternal(final Path src, final Path dst,
674      boolean overwrite) throws AccessControlException,
675      FileAlreadyExistsException, FileNotFoundException,
676      ParentNotDirectoryException, UnresolvedLinkException, IOException {
677    // Default implementation deals with overwrite in a non-atomic way
678    final FileStatus srcStatus = getFileLinkStatus(src);
679
680    FileStatus dstStatus;
681    try {
682      dstStatus = getFileLinkStatus(dst);
683    } catch (IOException e) {
684      dstStatus = null;
685    }
686    if (dstStatus != null) {
687      if (dst.equals(src)) {
688        throw new FileAlreadyExistsException(
689            "The source "+src+" and destination "+dst+" are the same");
690      }
691      if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) {
692        throw new FileAlreadyExistsException(
693            "Cannot rename symlink "+src+" to its target "+dst);
694      }
695      // It's OK to rename a file to a symlink and vice versa
696      if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
697        throw new IOException("Source " + src + " and destination " + dst
698            + " must both be directories");
699      }
700      if (!overwrite) {
701        throw new FileAlreadyExistsException("Rename destination " + dst
702            + " already exists.");
703      }
704      // Delete the destination that is a file or an empty directory
705      if (dstStatus.isDirectory()) {
706        RemoteIterator<FileStatus> list = listStatusIterator(dst);
707        if (list != null && list.hasNext()) {
708          throw new IOException(
709              "Rename cannot overwrite non empty destination directory " + dst);
710        }
711      }
712      delete(dst, false);
713    } else {
714      final Path parent = dst.getParent();
715      final FileStatus parentStatus = getFileStatus(parent);
716      if (parentStatus.isFile()) {
717        throw new ParentNotDirectoryException("Rename destination parent "
718            + parent + " is a file.");
719      }
720    }
721    renameInternal(src, dst);
722  }
723  
724  /**
725   * Returns true if the file system supports symlinks, false otherwise.
726   */
727  public boolean supportsSymlinks() {
728    return false;
729  }
730  
731  /**
732   * The specification of this method matches that of  
733   * {@link FileContext#createSymlink(Path, Path, boolean)};
734   */
735  public void createSymlink(final Path target, final Path link,
736      final boolean createParent) throws IOException, UnresolvedLinkException {
737    throw new IOException("File system does not support symlinks");    
738  }
739
740  /**
741   * The specification of this method matches that of  
742   * {@link FileContext#getLinkTarget(Path)};
743   */
744  public Path getLinkTarget(final Path f) throws IOException {
745    /* We should never get here. Any file system that threw an
746     * UnresolvedLinkException, causing this function to be called,
747     * needs to override this method.
748     */
749    throw new AssertionError();
750  }
751    
752  /**
753   * The specification of this method matches that of
754   * {@link FileContext#setPermission(Path, FsPermission)} except that Path f
755   * must be for this file system.
756   */
757  public abstract void setPermission(final Path f,
758      final FsPermission permission) throws AccessControlException,
759      FileNotFoundException, UnresolvedLinkException, IOException;
760
761  /**
762   * The specification of this method matches that of
763   * {@link FileContext#setOwner(Path, String, String)} except that Path f must
764   * be for this file system.
765   */
766  public abstract void setOwner(final Path f, final String username,
767      final String groupname) throws AccessControlException,
768      FileNotFoundException, UnresolvedLinkException, IOException;
769
770  /**
771   * The specification of this method matches that of
772   * {@link FileContext#setTimes(Path, long, long)} except that Path f must be
773   * for this file system.
774   */
775  public abstract void setTimes(final Path f, final long mtime,
776    final long atime) throws AccessControlException, FileNotFoundException,
777      UnresolvedLinkException, IOException;
778
779  /**
780   * The specification of this method matches that of
781   * {@link FileContext#getFileChecksum(Path)} except that Path f must be for
782   * this file system.
783   */
784  public abstract FileChecksum getFileChecksum(final Path f)
785      throws AccessControlException, FileNotFoundException,
786      UnresolvedLinkException, IOException;
787  
788  /**
789   * The specification of this method matches that of
790   * {@link FileContext#getFileStatus(Path)} 
791   * except that an UnresolvedLinkException may be thrown if a symlink is 
792   * encountered in the path.
793   */
794  public abstract FileStatus getFileStatus(final Path f)
795      throws AccessControlException, FileNotFoundException,
796      UnresolvedLinkException, IOException;
797
798  /**
799   * The specification of this method matches that of
800   * {@link FileContext#getFileLinkStatus(Path)}
801   * except that an UnresolvedLinkException may be thrown if a symlink is  
802   * encountered in the path leading up to the final path component.
803   * If the file system does not support symlinks then the behavior is
804   * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.
805   */
806  public FileStatus getFileLinkStatus(final Path f)
807      throws AccessControlException, FileNotFoundException,
808      UnsupportedFileSystemException, IOException {
809    return getFileStatus(f);
810  }
811
812  /**
813   * The specification of this method matches that of
814   * {@link FileContext#getFileBlockLocations(Path, long, long)} except that
815   * Path f must be for this file system.
816   */
817  public abstract BlockLocation[] getFileBlockLocations(final Path f,
818      final long start, final long len) throws AccessControlException,
819      FileNotFoundException, UnresolvedLinkException, IOException;
820
821  /**
822   * The specification of this method matches that of
823   * {@link FileContext#getFsStatus(Path)} except that Path f must be for this
824   * file system.
825   */
826  public FsStatus getFsStatus(final Path f) throws AccessControlException,
827      FileNotFoundException, UnresolvedLinkException, IOException {
828    // default impl gets FsStatus of root
829    return getFsStatus();
830  }
831  
832  /**
833   * The specification of this method matches that of
834   * {@link FileContext#getFsStatus(Path)}.
835   */
836  public abstract FsStatus getFsStatus() throws AccessControlException,
837      FileNotFoundException, IOException;
838
839  /**
840   * The specification of this method matches that of
841   * {@link FileContext#listStatus(Path)} except that Path f must be for this
842   * file system.
843   */
844  public RemoteIterator<FileStatus> listStatusIterator(final Path f)
845      throws AccessControlException, FileNotFoundException,
846      UnresolvedLinkException, IOException {
847    return new RemoteIterator<FileStatus>() {
848      private int i = 0;
849      private FileStatus[] statusList = listStatus(f);
850      
851      @Override
852      public boolean hasNext() {
853        return i < statusList.length;
854      }
855      
856      @Override
857      public FileStatus next() {
858        if (!hasNext()) {
859          throw new NoSuchElementException();
860        }
861        return statusList[i++];
862      }
863    };
864  }
865
866  /**
867   * The specification of this method matches that of
868   * {@link FileContext#listLocatedStatus(Path)} except that Path f 
869   * must be for this file system.
870   */
871  public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
872      throws AccessControlException, FileNotFoundException,
873      UnresolvedLinkException, IOException {
874    return new RemoteIterator<LocatedFileStatus>() {
875      private RemoteIterator<FileStatus> itor = listStatusIterator(f);
876      
877      @Override
878      public boolean hasNext() throws IOException {
879        return itor.hasNext();
880      }
881      
882      @Override
883      public LocatedFileStatus next() throws IOException {
884        if (!hasNext()) {
885          throw new NoSuchElementException("No more entry in " + f);
886        }
887        FileStatus result = itor.next();
888        BlockLocation[] locs = null;
889        if (result.isFile()) {
890          locs = getFileBlockLocations(
891              result.getPath(), 0, result.getLen());
892        }
893        return new LocatedFileStatus(result, locs);
894      }
895    };
896  }
897
898  /**
899   * The specification of this method matches that of
900   * {@link FileContext.Util#listStatus(Path)} except that Path f must be 
901   * for this file system.
902   */
903  public abstract FileStatus[] listStatus(final Path f)
904      throws AccessControlException, FileNotFoundException,
905      UnresolvedLinkException, IOException;
906
907  /**
908   * @return an iterator over the corrupt files under the given path
909   * (may contain duplicates if a file has more than one corrupt block)
910   * @throws IOException
911   */
912  public RemoteIterator<Path> listCorruptFileBlocks(Path path)
913    throws IOException {
914    throw new UnsupportedOperationException(getClass().getCanonicalName() +
915                                            " does not support" +
916                                            " listCorruptFileBlocks");
917  }
918
919  /**
920   * The specification of this method matches that of
921   * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
922   * must be for this file system.
923   */
924  public abstract void setVerifyChecksum(final boolean verifyChecksum)
925      throws AccessControlException, IOException;
926  
927  /**
928   * Get a canonical name for this file system.
929   * @return a URI string that uniquely identifies this file system
930   */
931  public String getCanonicalServiceName() {
932    return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort());
933  }
934  
935  /**
936   * Get one or more delegation tokens associated with the filesystem. Normally
937   * a file system returns a single delegation token. A file system that manages
938   * multiple file systems underneath, could return set of delegation tokens for
939   * all the file systems it manages
940   * 
941   * @param renewer the account name that is allowed to renew the token.
942   * @return List of delegation tokens.
943   *   If delegation tokens not supported then return a list of size zero.
944   * @throws IOException
945   */
946  @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
947  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
948    return new ArrayList<Token<?>>(0);
949  }
950  
951  @Override //Object
952  public int hashCode() {
953    return myUri.hashCode();
954  }
955  
956  @Override //Object
957  public boolean equals(Object other) {
958    if (other == null || !(other instanceof AbstractFileSystem)) {
959      return false;
960    }
961    return myUri.equals(((AbstractFileSystem) other).myUri);
962  }
963}