james-server-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Felix Knecht <fel...@apache.org>
Subject Re: svn commit: r1182282 - in /james/mailbox/trunk/hbase/src/test: java/org/apache/james/mailbox/hbase/HBaseClusterSingleton.java resources/hdfs-default.xml
Date Wed, 12 Oct 2011 13:20:07 GMT
On 10/12/2011 02:56 PM, Eric Charles wrote:
> Hi,
>
> In hadoop/hbase world, you set the conf properties by defining a
> *-site.xml where you override the default properties defined in
> *-default.xml which is shipped in the hadoop/hbase jar.
>
> So rather than having hdfs-default.xml in our source tree, hdfs-site.xml
> with only dfs.datanode.data.dir.perm property defined will be better.

Thanks for hint. It's even written in the header comment of the file :-(

We would say "Lappi mach d Ouge uuf" (Open your eyes, dumb cluck!)
Felix

>
> Thx,
> Eric
>
> On 12/10/11 11:28, felixk@apache.org wrote:
>> Author: felixk
>> Date: Wed Oct 12 09:28:10 2011
>> New Revision: 1182282
>>
>> URL: http://svn.apache.org/viewvc?rev=1182282&view=rev
>> Log:
>> Move hard coded configurations into configuration file. The
>> configuration file originally is taken from
>> hadoop-core-0.20-append-r1057313.jar
>>
>> Added:
>> james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml (with
>> props)
>> Modified:
>> james/mailbox/trunk/hbase/src/test/java/org/apache/james/mailbox/hbase/HBaseClusterSingleton.java
>>
>>
>> Modified:
>> james/mailbox/trunk/hbase/src/test/java/org/apache/james/mailbox/hbase/HBaseClusterSingleton.java
>>
>> URL:
>> http://svn.apache.org/viewvc/james/mailbox/trunk/hbase/src/test/java/org/apache/james/mailbox/hbase/HBaseClusterSingleton.java?rev=1182282&r1=1182281&r2=1182282&view=diff
>>
>> ==============================================================================
>>
>> ---
>> james/mailbox/trunk/hbase/src/test/java/org/apache/james/mailbox/hbase/HBaseClusterSingleton.java
>> (original)
>> +++
>> james/mailbox/trunk/hbase/src/test/java/org/apache/james/mailbox/hbase/HBaseClusterSingleton.java
>> Wed Oct 12 09:28:10 2011
>> @@ -50,16 +50,6 @@ public class HBaseClusterSingleton {
>> public HBaseClusterSingleton(boolean useMiniCluster) throws Exception {
>> if (useMiniCluster) {
>> HBaseTestingUtility htu = new HBaseTestingUtility();
>> - htu.getConfiguration().setBoolean("dfs.support.append", true);
>> -
>> - // TODO
>> - // I got wrong filepermissions on the created files. This fixes it
>> for me.
>> - // Maybe this depends on the umask settings on the local linux box?
>> - // IMO following line should solve this, but it doesn't ..
>> - // (see http://hadoop.apache.org/hdfs/docs/r0.21.0/hdfs-default.html)
>> - //
>> - // htu.getConfiguration().set("dfs.permissions.enabled", "false");
>> - htu.getConfiguration().set("dfs.datanode.data.dir.perm", "775");
>> try {
>> hbaseCluster = htu.startMiniCluster();
>> conf = hbaseCluster.getConfiguration();
>>
>> Added: james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml
>> URL:
>> http://svn.apache.org/viewvc/james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml?rev=1182282&view=auto
>>
>> ==============================================================================
>>
>> --- james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml (added)
>> +++ james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml Wed
>> Oct 12 09:28:10 2011
>> @@ -0,0 +1,370 @@
>> +<?xml version="1.0"?>
>> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
>> +
>> +<!-- Do not modify this file directly. Instead, copy entries that you
>> -->
>> +<!-- wish to modify from this file into hdfs-site.xml and change them
>> -->
>> +<!-- there. If hdfs-site.xml does not already exist, create it. -->
>> +
>> +<configuration>
>> +
>> +<property>
>> +<name>dfs.namenode.logging.level</name>
>> +<value>info</value>
>> +<description>The logging level for dfs namenode. Other values are
>> "dir"(trac
>> +e namespace mutations), "block"(trace block under/over replications
>> and block
>> +creations/deletions), or "all".</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.secondary.http.address</name>
>> +<value>0.0.0.0:50090</value>
>> +<description>
>> + The secondary namenode http server address and port.
>> + If the port is 0 then the server will start on a free port.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.address</name>
>> +<value>0.0.0.0:50010</value>
>> +<description>
>> + The address where the datanode server will listen to.
>> + If the port is 0 then the server will start on a free port.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.http.address</name>
>> +<value>0.0.0.0:50075</value>
>> +<description>
>> + The datanode http server address and port.
>> + If the port is 0 then the server will start on a free port.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.ipc.address</name>
>> +<value>0.0.0.0:50020</value>
>> +<description>
>> + The datanode ipc server address and port.
>> + If the port is 0 then the server will start on a free port.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.handler.count</name>
>> +<value>3</value>
>> +<description>The number of server threads for the
>> datanode.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.http.address</name>
>> +<value>0.0.0.0:50070</value>
>> +<description>
>> + The address and the base port where the dfs namenode web ui will
>> listen on.
>> + If the port is 0 then the server will start on a free port.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.https.enable</name>
>> +<value>false</value>
>> +<description>Decide if HTTPS(SSL) is supported on HDFS
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.https.need.client.auth</name>
>> +<value>false</value>
>> +<description>Whether SSL client certificate authentication is required
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.https.server.keystore.resource</name>
>> +<value>ssl-server.xml</value>
>> +<description>Resource file from which ssl server keystore
>> + information will be extracted
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.https.client.keystore.resource</name>
>> +<value>ssl-client.xml</value>
>> +<description>Resource file from which ssl client keystore
>> + information will be extracted
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.https.address</name>
>> +<value>0.0.0.0:50475</value>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.https.address</name>
>> +<value>0.0.0.0:50470</value>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.dns.interface</name>
>> +<value>default</value>
>> +<description>The name of the Network Interface from which a data node
>> should
>> + report its IP address.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.dns.nameserver</name>
>> +<value>default</value>
>> +<description>The host name or IP address of the name server (DNS)
>> + which a DataNode should use to determine the host name used by the
>> + NameNode for communication and display purposes.
>> +</description>
>> +</property>
>> +
>> +
>> +
>> +<property>
>> +<name>dfs.replication.considerLoad</name>
>> +<value>true</value>
>> +<description>Decide if chooseTarget considers the target's load or not
>> +</description>
>> +</property>
>> +<property>
>> +<name>dfs.default.chunk.view.size</name>
>> +<value>32768</value>
>> +<description>The number of bytes to view for a file on the browser.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.du.reserved</name>
>> +<value>0</value>
>> +<description>Reserved space in bytes per volume. Always leave this
>> much space free for non dfs use.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.name.dir</name>
>> +<value>${hadoop.tmp.dir}/dfs/name</value>
>> +<description>Determines where on the local filesystem the DFS name node
>> + should store the name table(fsimage). If this is a comma-delimited list
>> + of directories then the name table is replicated in all of the
>> + directories, for redundancy.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.name.edits.dir</name>
>> +<value>${dfs.name.dir}</value>
>> +<description>Determines where on the local filesystem the DFS name node
>> + should store the transaction (edits) file. If this is a
>> comma-delimited list
>> + of directories then the transaction file is replicated in all of the
>> + directories, for redundancy. Default value is same as dfs.name.dir
>> +</description>
>> +</property>
>> +<property>
>> +<name>dfs.web.ugi</name>
>> +<value>webuser,webgroup</value>
>> +<description>The user account used by the web interface.
>> + Syntax: USERNAME,GROUP1,GROUP2, ...
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.permissions</name>
>> +<value>true</value>
>> +<description>
>> + If "true", enable permission checking in HDFS.
>> + If "false", permission checking is turned off,
>> + but all other behavior is unchanged.
>> + Switching from one parameter value to the other does not change the
>> mode,
>> + owner or group of files or directories.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.permissions.supergroup</name>
>> +<value>supergroup</value>
>> +<description>The name of the group of super-users.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.data.dir</name>
>> +<value>${hadoop.tmp.dir}/dfs/data</value>
>> +<description>Determines where on the local filesystem an DFS data node
>> + should store its blocks. If this is a comma-delimited
>> + list of directories, then data will be stored in all named
>> + directories, typically on different devices.
>> + Directories that do not exist are ignored.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.datanode.data.dir.perm</name>
>> +<value>775</value>
>> +<description>Permissions for the directories on on the local
>> filesystem where
>> + the DFS data node store its blocks. The permissions can either be
>> octal or symbolic.
>> +</description>
>> +</property>
>> +<property>
>> +<name>dfs.replication</name>
>> +<value>3</value>
>> +<description>Default block replication.
>> + The actual number of replications can be specified when the file is
>> created.
>> + The default is used if replication is not specified in create time.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.replication.max</name>
>> +<value>512</value>
>> +<description>Maximal block replication.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.replication.min</name>
>> +<value>1</value>
>> +<description>Minimal block replication.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.block.size</name>
>> +<value>67108864</value>
>> +<description>The default block size for new files.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.df.interval</name>
>> +<value>60000</value>
>> +<description>Disk usage statistics refresh interval in
>> msec.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.client.block.write.retries</name>
>> +<value>3</value>
>> +<description>The number of retries for writing blocks to the data nodes,
>> + before we signal failure to the application.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.blockreport.intervalMsec</name>
>> +<value>3600000</value>
>> +<description>Determines block reporting interval in
>> milliseconds.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.blockreport.initialDelay</name> <value>0</value>
>> +<description>Delay for first block report in seconds.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.heartbeat.interval</name>
>> +<value>3</value>
>> +<description>Determines datanode heartbeat interval in
>> seconds.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.namenode.handler.count</name>
>> +<value>10</value>
>> +<description>The number of server threads for the
>> namenode.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.safemode.threshold.pct</name>
>> +<value>0.999f</value>
>> +<description>
>> + Specifies the percentage of blocks that should satisfy
>> + the minimal replication requirement defined by dfs.replication.min.
>> + Values less than or equal to 0 mean not to start in safe mode.
>> + Values greater than 1 will make safe mode permanent.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.safemode.extension</name>
>> +<value>30000</value>
>> +<description>
>> + Determines extension of safe mode in milliseconds
>> + after the threshold level is reached.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.balance.bandwidthPerSec</name>
>> +<value>1048576</value>
>> +<description>
>> + Specifies the maximum amount of bandwidth that each datanode
>> + can utilize for the balancing purpose in term of
>> + the number of bytes per second.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.hosts</name>
>> +<value></value>
>> +<description>Names a file that contains a list of hosts that are
>> + permitted to connect to the namenode. The full pathname of the file
>> + must be specified. If the value is empty, all hosts are
>> + permitted.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.hosts.exclude</name>
>> +<value></value>
>> +<description>Names a file that contains a list of hosts that are
>> + not permitted to connect to the namenode. The full pathname of the
>> + file must be specified. If the value is empty, no hosts are
>> + excluded.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.max.objects</name>
>> +<value>0</value>
>> +<description>The maximum number of files, directories and blocks
>> + dfs supports. A value of zero indicates no limit to the number
>> + of objects that dfs supports.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.namenode.decommission.interval</name>
>> +<value>30</value>
>> +<description>Namenode periodicity in seconds to check if decommission is
>> + complete.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.namenode.decommission.nodes.per.interval</name>
>> +<value>5</value>
>> +<description>The number of nodes namenode checks if decommission is
>> complete
>> + in each dfs.namenode.decommission.interval.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.replication.interval</name>
>> +<value>3</value>
>> +<description>The periodicity in seconds with which the namenode computes
>> + repliaction work for datanodes.</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.access.time.precision</name>
>> +<value>3600000</value>
>> +<description>The access time for HDFS file is precise upto this value.
>> + The default value is 1 hour. Setting a value of 0 disables
>> + access times for HDFS.
>> +</description>
>> +</property>
>> +
>> +<property>
>> +<name>dfs.support.append</name>
>> +<value>true</value>
>> +<description>This branch of HDFS supports reliable append/sync.
>> +</description>
>> +</property>
>> +
>> +</configuration>
>>
>> Propchange: james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml
>> ------------------------------------------------------------------------------
>>
>> svn:eol-style = native
>>
>> Propchange: james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml
>> ------------------------------------------------------------------------------
>>
>> svn:keywords = Author Date Id Revision
>>
>>
>>
>> ---------------------------------------------------------------------
>> To unsubscribe, e-mail: server-dev-unsubscribe@james.apache.org
>> For additional commands, e-mail: server-dev-help@james.apache.org
>>
>


---------------------------------------------------------------------
To unsubscribe, e-mail: server-dev-unsubscribe@james.apache.org
For additional commands, e-mail: server-dev-help@james.apache.org


Mime
View raw message