hadoop-common/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs

  • /mount/mountd

 super(new RpcProgramMountd(config, registrationSocket));

>> MountdBase의 Constructor에 RpcProgram을 전달

↓↓

public RpcProgramMountd(Configuration config, DatagramSocket registrationSocket) throws IOException {

    // Note that RPC cache is not enabled


    /*  

     *  protected RpcProgram(String program, String host, int port, int       progNumber,int lowProgVersion, int highProgVersion,DatagramSocket registrationSocket)

     *  RpcProgram.java의 Constructor에서 RpcProgram객체에 매개변수를 할당

     *  @param program program name

     *  @param host host where the Rpc server program is started

     *  @param port port where the Rpc server program is listening to

     *  @param progNumber program number as defined in RFC 1050

     *  @param lowProgVersion lowest version of the specification supported

     *  @param highProgVersion highest version of the specification supported

     */ 

    super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),

        PROGRAM, VERSION_1, VERSION_3, registrationSocket); 

   

    //export는 공유 대상(mount를 할 대상) 

    exports = new ArrayList<String>();

    exports.add(config.get(Nfs3Constant.EXPORT_POINT,

        Nfs3Constant.EXPORT_POINT_DEFAULT));

    this.hostsMatcher = NfsExports.getInstance(config);


    //mount entry 할당

    this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());

    UserGroupInformation.setConfiguration(config);

    SecurityUtil.login(config, DFS_NFS_KEYTAB_FILE_KEY,

            DFS_NFS_KERBEROS_PRINCIPAL_KEY);

    //namenode 세팅

    this.dfsClient = new DFSClient(NameNode.getAddress(config), config);

  }

 

 mountd.start(true); // UDP, TCP server Start



저작자 표시 비영리 변경 금지
신고
Posted by youngjinkmi0706


티스토리 툴바