Initializing or restoring a node

The following steps should be taken during application initialization to initialize the high-availability services:

  1. Install any required partition mappers. This must be done before any partitioned objects are created.

  2. Wait for all nodes to be active that are required for the partition definitions on the local node.

  3. Define and enable all partitions that should be known on the local node. This includes partitions for which the local node is the active or a replica node. It also includes any sparse partitions.

  4. Enable application work in a separate transaction from the above steps.

Other than the installation of the partition mappers, all of these steps can be performed by the administrative tools, assuming that application work can be started through an administrative action, either an explicit command (start a channel), or load configuration. The Example 7.6, “Node initialization” snippet shows the pattern to be used for node initialization.

Restoring a node to service following a failure is identical to normal node initialization with the only possible exception being the EnableAction used when enabling partitions. During normal node initialization an EnableAction of JOIN_CLUSTER can be used because there are no objects in shared memory. However, in the case where a node is being restored, and shared memory contains objects that should be removed before re-joining the cluster, an EnableAction of JOIN_CLUSTER_PURGE should be used to remove the stale objects from shared memory.

Example 7.6. Node initialization

//     $Revision: 1.1.2.3 $
package com.kabira.snippets.highavailability;

import com.kabira.platform.Transaction;
import com.kabira.platform.annotation.Managed;
import com.kabira.platform.highavailability.Partition;
import com.kabira.platform.highavailability.PartitionManager;
import com.kabira.platform.highavailability.PartitionManager.EnableAction;
import com.kabira.platform.highavailability.PartitionMapper;
import com.kabira.platform.highavailability.PartitionMapper.Properties.Audit;
import com.kabira.platform.highavailability.ReplicaNode;
import com.kabira.platform.highavailability.ReplicaNode.ReplicationType;
import com.kabira.platform.property.Status;

/**
 * Initialize a node for high-availability
 * <p>
 * <h2> Target Nodes</h2>
 * <ul>
 * <li> <b>domainname</b> = Development</li>
 * </ul>
 * </p>
 */
public class LifeCycle
{
    @Managed
    private static class X
    {
        X(final String node)
        {
            m_node = node;
        }
        private final String m_node;
    };

    private static class Mapper extends PartitionMapper
    {
        @Override
        public String getPartition(Object o)
        {
            assert o instanceof X : o;
            return _PARTITION_NAME;
        }
    }

    public static void main(final String[] args)
    {
        installMapper();

        //
        //    Wait for all required nodes to come active
        //
        for (String n : _NODE_LIST)
        {
            waitForActive(n);
        }

        activatePartition();

        enableApplicationWork();
    }

    private static void installMapper()
    {
        new Transaction()
        {
            @Override
            protected void run()
            {
                //
                //    Verify that there are no unpartitioned instances
                //
                PartitionMapper.Properties properties
                    = new PartitionMapper.Properties(Audit.VERIFY_PARTIONING);
                PartitionManager.setMapper(X.class, new Mapper(), properties);
            }
        }.execute();
    }

    private static void waitForActive(final String node)
    {
        new Transaction()
        {
            @Override
            protected void run()
            {
                PartitionManager.waitForNode(node);
            }
        }.execute();

    }

    private static void activatePartition()
    {
        new Transaction()
        {

            @Override
            protected void run() throws Transaction.Rollback
            {
                ReplicaNode[] replicas = new ReplicaNode[2];
                replicas[0] = new ReplicaNode("B", ReplicationType.SYNCHRONOUS);
                replicas[1] = new ReplicaNode("C", ReplicationType.SYNCHRONOUS);

                PartitionManager.definePartition(
                    _PARTITION_NAME,
                    null,
                    "A",
                    replicas);
                Partition partition = PartitionManager.getPartition(_PARTITION_NAME);
                partition.enable(EnableAction.JOIN_CLUSTER);
            }
        }.execute();
    }

    private static void enableApplicationWork()
    {
        new Transaction()
        {
            @Override
            protected void run() throws Transaction.Rollback
            {
                new X(System.getProperty(Status.NODE_NAME));
            }
        }.execute();
    }

    private static final String _PARTITION_NAME = LifeCycle.class.getSimpleName();
    private static final String[] _NODE_LIST =
    {
        "A", "B", "C"
    };
}