2011年4月29日 星期五

Show Transaction status for log file.

在查Spring Transaction 的時候,看到一個例子,
在Log日誌檔裡的每行加上Transaction 的狀態
[+] or [-] ,
主要是透過Reflect 的機制去取TransactionSynchronizationManager class
再取得isActualTransactionActive 屬性,來決定Transaction的狀態,
這邊log4j要使用xml格式,不能使用properties檔


TransactionIndicatingFilter.java
import static com.gfactor.emaildiscovery.utils.TransactionIndicatingUtil.getTransactionStatus;

import org.apache.log4j.spi.Filter;
import org.apache.log4j.spi.LoggingEvent;

public class TransactionIndicatingFilter extends Filter {
    @Override
    public int decide(LoggingEvent loggingEvent) {
        loggingEvent.setProperty("xaName", getTransactionStatus(true) );
        loggingEvent.setProperty("xaStatus", getTransactionStatus(false) );
        return Filter.NEUTRAL;
    }  
}

TransactionIndicatingUtil.java
package com.gfactor.emaildiscovery.utils;

public class TransactionIndicatingUtil {
 private final static String TSM_CLASSNAME = "org.springframework.transaction.support.TransactionSynchronizationManager";

 public static String getTransactionStatus(boolean verbose) {
  String status = null;

  try {
   ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
   
   if (contextClassLoader != null) {
    Class tsmClass = contextClassLoader.loadClass(TSM_CLASSNAME);
    Boolean isActive = (Boolean) tsmClass.getMethod("isActualTransactionActive", null).invoke(null, null);
    
    if (!verbose) {
     status = (isActive) ? "[ ] " : "[-] ";
    } else {
     String transactionName = (String) tsmClass.getMethod("getCurrentTransactionName", null).invoke(null,null);
     
     status = (isActive) ? "["   transactionName   "] " : "[no transaction] ";
    }
   } else {
    status = (verbose) ? "[ccl unavailable] " : "[x ]";
   }
  } catch (Exception e) {
   status = (verbose) ? "[spring unavailable] " : "[x ]";
  }
  return status;
 }
}


log4j.xml 在appender裡面加上filter class, param name加上[%X{xaStatus}]
<appender name="stdout" class="org.apache.log4j.ConsoleAppender">
  <layout class="org.apache.log4j.PatternLayout">
   <param name="ConversionPattern" value="%d{HH:mm:ss} [%-5p] %F:%L - %m%n  Ts-[%X{xaStatus}] ," />
  </layout>
  <filter class="com.gfactor.emaildiscovery.utils.TransactionIndicatingFilter" />
 </appender>

OSGi - Using ApacheDS , embedded LDAP Server for OSGi Bundle

前陣子在實作SpringDM上的JAAS,順便弄了個LDAP Server, 把他發佈到OSGi上成一個
首先要把apacheds-all-1.5.7.jar 抓下來

建立一個Class, MyActavitor.java
package com.embed.ldap.service;
public class MyActavitor {
 private EmbeddedADSVer157 embeddedAds;
  
 public void setEmbeddedAds(EmbeddedADSVer157 embeddedAds) {
  this.embeddedAds = embeddedAds;
 }

 public void start() throws Exception {
  System.out.println("Hello Spring OSGi World!! ");
  embeddedAds.startServer();
 }

 public void stop() throws Exception {
  System.out.println("Goodbye Spring OSGi World!!");
  embeddedAds.stopServer();
 }
 
 
  /*

    public static void main( String[] args ) 
    {
        try
        {
            File workDir = new File( System.getProperty( "java.io.tmpdir" )   "/server-work" );
            workDir.mkdirs();
            
            // Create the server
            EmbeddedADSVer157 ads = new EmbeddedADSVer157( workDir );

            // Read an entry
            Entry result = ads.service.getAdminSession().lookup( new DN( "dc=apache,dc=org" ) );

            // And print it if available
            System.out.println( "Found entry : "   result );
            
            // optionally we can start a server too
            ads.startServer();
            
            System.out.println("for test ");
        }
        catch ( Exception e )
        {
            // Ok, we have something wrong going on ...
            e.printStackTrace();
        }
    }*/
}

這個Example 從ApacheDS官網上給的,透過startServer()可以將LDAP Server打開
EmbeddedADSVer157.java
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *  http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */
package com.embed.ldap.service;

import java.io.File;
import java.util.HashSet;
import java.util.List;

import org.apache.directory.server.constants.ServerDNConstants;
import org.apache.directory.server.core.DefaultDirectoryService;
import org.apache.directory.server.core.DirectoryService;
import org.apache.directory.server.core.partition.Partition;
import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmIndex;
import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
import org.apache.directory.server.core.partition.ldif.LdifPartition;
import org.apache.directory.server.core.schema.SchemaPartition;
import org.apache.directory.server.ldap.LdapServer;
import org.apache.directory.server.protocol.shared.transport.TcpTransport;
import org.apache.directory.server.xdbm.Index;
import org.apache.directory.shared.ldap.entry.Entry;
import org.apache.directory.shared.ldap.entry.ServerEntry;
import org.apache.directory.shared.ldap.exception.LdapException;
import org.apache.directory.shared.ldap.name.DN;
import org.apache.directory.shared.ldap.schema.SchemaManager;
import org.apache.directory.shared.ldap.schema.ldif.extractor.SchemaLdifExtractor;
import org.apache.directory.shared.ldap.schema.ldif.extractor.impl.DefaultSchemaLdifExtractor;
import org.apache.directory.shared.ldap.schema.loader.ldif.LdifSchemaLoader;
import org.apache.directory.shared.ldap.schema.manager.impl.DefaultSchemaManager;
import org.apache.directory.shared.ldap.schema.registries.SchemaLoader;
import org.springframework.beans.factory.annotation.Configurable;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.Resource;

import com.embed.ldap.service.internal.ContextResourceLoader;

/**
 * A simple example exposing how to embed Apache Directory Server version 1.5.7
 * into an application.
 * 
 * @author <a href="mailto:dev@directory.apache.org">Apache Directory
 *         Project</a>
 * @version $Rev$, $Date$ 
 */
@Configurable(preConstruction=true)
public class EmbeddedADSVer157 {
 
 /** The directory service */
 public DirectoryService service;

 /** The LDAP server */
 private LdapServer server;
 
 
 private ContextResourceLoader ctxRsLoader;
 /**
  * Add a new partition to the server
  * 
  * @param partitionId
  *            The partition Id
  * @param partitionDn
  *            The partition DN
  * @return The newly added partition
  * @throws Exception
  *             If the partition can't be added
  */
 private Partition addPartition(String partitionId, String partitionDn)
   throws Exception {
  // Create a new partition named 'foo'.
  JdbmPartition partition = new JdbmPartition();
  partition.setId(partitionId);
  partition.setPartitionDir(new File(service.getWorkingDirectory(),
    partitionId));
  partition.setSuffix(partitionDn);
  service.addPartition(partition);

  return partition;
 }

 /**
  * Add a new set of index on the given attributes
  * 
  * @param partition
  *            The partition on which we want to add index
  * @param attrs
  *            The list of attributes to index
  */
 private void addIndex(Partition partition, String... attrs) {
  // Index some attributes on the apache partition
  HashSet<Index<?, ServerEntry, Long>> indexedAttributes = new HashSet<Index<?, ServerEntry, Long>>();

  for (String attribute : attrs) {
   indexedAttributes
     .add(new JdbmIndex<String, ServerEntry>(attribute));
  }

  ((JdbmPartition) partition).setIndexedAttributes(indexedAttributes);
 }

 /**
  * initialize the schema manager and add the schema partition to diectory
  * service
  * 
  * @throws Exception
  *             if the schema LDIF files are not found on the classpath
  */
 private void initSchemaPartition() throws Exception {
  SchemaPartition schemaPartition = service.getSchemaService().getSchemaPartition();

  // Init the LdifPartition
  LdifPartition ldifPartition = new LdifPartition();
  String workingDirectory = service.getWorkingDirectory().getAbsolutePath();
  ldifPartition.setWorkingDirectory(workingDirectory   "/schema");
  System.out.println("initSchemaPartition workingDirectory ->>" workingDirectory);
  
  // Extract the schema on disk (a brand new one) and load the registries
  File schemaRepository = new File(workingDirectory, "schema");
  System.out.println("133 - " schemaRepository.getAbsolutePath());

  SchemaLdifExtractor extractor = new MyDefaultSchemaLdifExtractor(new File(workingDirectory));
                                      
  System.out.println("136 - " extractor); 
  
  extractor.extractOrCopy(true);
  System.out.println("138 - extractor.extractOrCopy"); 
  
  schemaPartition.setWrappedPartition(ldifPartition);
  System.out.println("141 - setWrappedPartition"); 
  SchemaLoader loader = new LdifSchemaLoader(schemaRepository);
  System.out.println("142 -loader " loader);
  SchemaManager schemaManager = new DefaultSchemaManager(loader);
  service.setSchemaManager(schemaManager);
   
  // We have to load the schema now, otherwise we won't be able
  // to initialize the Partitions, as we won't be able to parse
  // and normalize their suffix DN
  schemaManager.loadAllEnabled();
  System.out.println("150");
  schemaPartition.setSchemaManager(schemaManager);

  List<Throwable> errors = schemaManager.getErrors();

  if (errors.size() != 0) {
   throw new Exception("Schema load failed : "   errors);
  }
 }

 /**
  * Initialize the server. It creates the partition, adds the index, and
  * injects the context entries for the created partitions.
  * 
  * @param workDir
  *            the directory to be used for storing the data
  * @throws Exception
  *             if there were some problems while initializing the system
  */
 private void initDirectoryService(File workDir) throws Exception {
  System.out.println("initDirectoryService workdir =" workDir.getAbsolutePath());
  // Initialize the LDAP service
  service = new DefaultDirectoryService();
  service.setWorkingDirectory(workDir);

  // first load the schema
  initSchemaPartition();

  // then the system partition
  // this is a MANDATORY partition
  Partition systemPartition = addPartition("system",ServerDNConstants.SYSTEM_DN);
  service.setSystemPartition(systemPartition);

  // Disable the ChangeLog system
  service.getChangeLog().setEnabled(false);
  service.setDenormalizeOpAttrsEnabled(true);

  // Now we can create as many partitions as we need
  // Create some new partitions named 'foo', 'bar' and 'apache'.
  Partition fooPartition = addPartition("foo", "dc=foo,dc=com");
  Partition barPartition = addPartition("bar", "dc=bar,dc=com");
  Partition apachePartition = addPartition("apache", "dc=apache,dc=org");

  // Index some attributes on the apache partition
  addIndex(apachePartition, "objectClass", "ou", "uid");

  // And start the service
  service.startup();

  // Inject the foo root entry if it does not already exist
  try {
   service.getAdminSession().lookup(fooPartition.getSuffixDn());
  } catch (LdapException lnnfe) {
   DN dnFoo = new DN("dc=foo,dc=com");
   ServerEntry entryFoo = service.newEntry(dnFoo);
   entryFoo.add("objectClass", "top", "domain", "extensibleObject");
   entryFoo.add("dc", "foo");
   service.getAdminSession().add(entryFoo);
  }

  // Inject the bar root entry
  try {
   service.getAdminSession().lookup(barPartition.getSuffixDn());
  } catch (LdapException lnnfe) {
   DN dnBar = new DN("dc=bar,dc=com");
   ServerEntry entryBar = service.newEntry(dnBar);
   entryBar.add("objectClass", "top", "domain", "extensibleObject");
   entryBar.add("dc", "bar");
   service.getAdminSession().add(entryBar);
  }

  // Inject the apache root entry
  if (!service.getAdminSession().exists(apachePartition.getSuffixDn())) {
   DN dnApache = new DN("dc=Apache,dc=Org");
   ServerEntry entryApache = service.newEntry(dnApache);
   entryApache.add("objectClass", "top", "domain", "extensibleObject");
   entryApache.add("dc", "Apache");
   service.getAdminSession().add(entryApache);
  }

  // We are all done !
 }

 /**
  * Creates a new instance of EmbeddedADS. It initializes the directory
  * service.
  * 
  * @throws Exception
  *             If something went wrong
  */
 public EmbeddedADSVer157(String filePath,ContextResourceLoader ctxRsLoader) throws Exception {
  this.ctxRsLoader = ctxRsLoader;
  System.out.println("filePath =" filePath);    
  System.out.println("ctxRsLoader = " ctxRsLoader);
  File rs = ctxRsLoader.getResourceFile(filePath);
  File rs2 = new File("/LDAP/server-work");
  System.out.println("rs2 ="   rs2.getAbsolutePath());
  System.out.println("rs = " rs  " , " rs.getName());
  
  File workDir = new File( System.getProperty( "java.io.tmpdir" )   "/server-work" );
  System.out.println("schema.resource.location =" System.getProperty("java.class.path"));
//  File workDir = new File(filePath);
  initDirectoryService(workDir);
 }

 /**
  * starts the LdapServer
  * 
  * @throws Exception
  */
 public void startServer() throws Exception {
  Entry result = this.service.getAdminSession().lookup(new DN("dc=apache,dc=org"));
  server = new LdapServer();
  int serverPort = 10389;
  server.setTransports(new TcpTransport(serverPort));
  server.setDirectoryService(service);
  server.start();

 }

 public void stopServer() {
  server.stop();
 }


 /*
  * public static void main( String[] args ) { try { File workDir = new File(
  * System.getProperty( "java.io.tmpdir" )   "/server-work" );
  * workDir.mkdirs();
  * 
  * // Create the server EmbeddedADSVer157 ads = new EmbeddedADSVer157(
  * workDir );
  * 
  * // Read an entry Entry result = ads.service.getAdminSession().lookup( new
  * DN( "dc=apache,dc=org" ) );
  * 
  * // And print it if available System.out.println( "Found entry : "  
  * result );
  * 
  * // optionally we can start a server too ads.startServer();
  * 
  * System.out.println("for test "); } catch ( Exception e ) { // Ok, we have
  * something wrong going on ... e.printStackTrace(); } }
  */
}

建立一個MyDefaultSchemaLdifExtractor.java
package com.embed.ldap.service;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InvalidObjectException;
import java.net.URL;
import java.util.Enumeration;
import java.util.Map;
import java.util.Stack;
import java.util.UUID;
import java.util.Map.Entry;
import java.util.regex.Pattern;

import org.apache.directory.shared.i18n.I18n;
import org.apache.directory.shared.ldap.constants.SchemaConstants;
import org.apache.directory.shared.ldap.exception.LdapException;
import org.apache.directory.shared.ldap.ldif.LdapLdifException;
import org.apache.directory.shared.ldap.ldif.LdifEntry;
import org.apache.directory.shared.ldap.ldif.LdifReader;
import org.apache.directory.shared.ldap.schema.ldif.extractor.SchemaLdifExtractor;
import org.apache.directory.shared.ldap.schema.ldif.extractor.UniqueResourceException;
//import org.apache.directory.shared.ldap.schema.ldif.extractor.impl.ResourceMap;
import com.embed.ldap.service.ResourceMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


public class MyDefaultSchemaLdifExtractor implements SchemaLdifExtractor{
  private static final String BASE_PATH = "";
   
     private static final String SCHEMA_SUBDIR = "schema";
 
     private static final Logger LOG = LoggerFactory.getLogger( MyDefaultSchemaLdifExtractor.class );
 
     private boolean extracted;
  
     private File outputDirectory;
 
     private File schemaDirectory;
 
 
     /**
      * Creates an extractor which deposits files into the specified output
      * directory.
      *
      * @param outputDirectory the directory where the schema root is extracted
      */
     public MyDefaultSchemaLdifExtractor( File outputDirectory )
     {
         LOG.debug( "BASE_PATH set to {}, outputDirectory set to {}", BASE_PATH, outputDirectory );
         this.outputDirectory = outputDirectory;
         this.schemaDirectory = new File( outputDirectory, SCHEMA_SUBDIR );
 
         if ( ! outputDirectory.exists() )
         {
             LOG.debug( "Creating output directory: {}", outputDirectory );
             if( ! outputDirectory.mkdir() )
             {
                 LOG.error( "Failed to create outputDirectory: {}", outputDirectory );
             }
         }
         else
         {
             LOG.debug( "Output directory exists: no need to create." );
         }
 
         if ( ! schemaDirectory.exists() )
         {
          System.out.println("->>>> does NOT exist extracted set to false");
             LOG.info( "Schema directory '{}' does NOT exist: extracted state set to false.", schemaDirectory );
             extracted = false;
         }
         else
         {
          System.out.println("->>>> does NOT exist extracted set to true");
             LOG.info( "Schema directory '{}' does exist: extracted state set to true.", schemaDirectory );
             extracted = true;
         }
     }
 
 
     /**
      * Gets whether or not schema folder has been created or not.
      *
      * @return true if schema folder has already been extracted.
      */
     public boolean isExtracted()
     {
         return extracted;
     }
 
 
     /**
      * Extracts the LDIF files from a Jar file or copies exploded LDIF resources.
      *
      * @param overwrite over write extracted structure if true, false otherwise
      * @throws IOException if schema already extracted and on IO errors
      */
     public void extractOrCopy( boolean overwrite ) throws IOException
     {
      System.out.println("-->start extractOrCopy");
      System.out.println("--> outputDirectory.exists()" outputDirectory.exists());
      
         if ( ! outputDirectory.exists() )
         {
             outputDirectory.mkdir();
         }
 
         File schemaDirectory = new File( outputDirectory, SCHEMA_SUBDIR );
         System.out.println("--> schemaDirectory(getAbsolutePath)=" schemaDirectory.getAbsolutePath());
         System.out.println("--> schemaDirectory.exists()=" schemaDirectory.exists());
         
         if ( ! schemaDirectory.exists() )
         {
             schemaDirectory.mkdir();
         }
         else if ( ! overwrite )
         {
          System.out.println("-->  else if ( ! overwrite )");
             throw new IOException( I18n.err( I18n.ERR_08001, schemaDirectory.getAbsolutePath() ) );
         }
 
         Pattern pattern = Pattern.compile( ".*schema/ou=schema.*\\.ldif" );
         Map<String,Boolean> list = ResourceMap.getResources( pattern );
         System.out.println("ResourceMap size = " list.size());
         for ( Entry<String,Boolean> entry : list.entrySet() )
         {
          System.out.println("--> entry.getValue() =" entry.getValue());
             if ( entry.getValue() )
             {
              System.out.println("---> jar");
                 extractFromJar( entry.getKey() );
             }
             else
             {
              System.out.println("---> not jar");
                 File resource = new File( entry.getKey() );
                 copyFile( resource, getDestinationFile( resource ) );
             }
         }
     }
     
     
     /**
      * Extracts the LDIF files from a Jar file or copies exploded LDIF
      * resources without overwriting the resources if the schema has
      * already been extracted.
      *
      * @throws IOException if schema already extracted and on IO errors
      */
     public void extractOrCopy() throws IOException
     {
         extractOrCopy( false );
     }
     
     
     /**
      * Copies a file line by line from the source file argument to the 
      * destination file argument.
      *
      * @param source the source file to copy
      * @param destination the destination to copy the source to
      * @throws IOException if there are IO errors or the source does not exist
      */
     private void copyFile( File source, File destination ) throws IOException
     {
         LOG.debug( "copyFile(): source = {}, destination = {}", source, destination );
         
         if ( ! destination.getParentFile().exists() )
         {
             destination.getParentFile().mkdirs();
         }
         
         if ( ! source.getParentFile().exists() )
         {
             throw new FileNotFoundException( I18n.err( I18n.ERR_08002, source.getAbsolutePath() ) );
         }
         
         FileWriter out = new FileWriter( destination );
         
         try
         {
             LdifReader ldifReader = new LdifReader( source );
             boolean first = true;
             LdifEntry ldifEntry = null;
             
             while ( ldifReader.hasNext() )
             {
                 if ( first )
                 {
                     ldifEntry = ldifReader.next();
                     
                     if ( ldifEntry.get( SchemaConstants.ENTRY_UUID_AT ) == null )
                     {
                         // No UUID, let's create one
                         UUID entryUuid = UUID.randomUUID();
                         ldifEntry.addAttribute( SchemaConstants.ENTRY_UUID_AT, entryUuid.toString() );
                     }
                     
                     first = false;
                 }
                 else
                 {
                     // throw an exception : we should not have more than one entry per schema ldif file
                     String msg = I18n.err( I18n.ERR_08003, source );
                     LOG.error( msg );
                     throw new InvalidObjectException( msg );
                 }
             }
 
             ldifReader.close();
             
             // Add the version at the first line, to avoid a warning
             String ldifString = "version: 1\n"   ldifEntry.toString();
             
             out.write( ldifString );
             out.flush();
         }
         catch ( LdapLdifException ne )
         {
             // throw an exception : we should not have more than one entry per schema ldif file
             String msg = I18n.err( I18n.ERR_08004, source, ne.getLocalizedMessage() );
             LOG.error( msg );
             throw new InvalidObjectException( msg );
         }
         catch ( LdapException ne )
         {
             // throw an exception : we should not have more than one entry per schema ldif file
             String msg = I18n.err( I18n.ERR_08004, source, ne.getLocalizedMessage() );
             LOG.error( msg );
             throw new InvalidObjectException( msg );
         }
         finally
         {
             out.close();
         }
     }
 
     
     /**
      * Assembles the destination file by appending file components previously
      * pushed on the fileComponentStack argument.
      *
      * @param fileComponentStack stack containing pushed file components
      * @return the assembled destination file
      */
     private File assembleDestinationFile( Stack<String> fileComponentStack )
     {
         File destinationFile = outputDirectory.getAbsoluteFile();
         
         while ( ! fileComponentStack.isEmpty() )
         {
             destinationFile = new File( destinationFile, fileComponentStack.pop() );
         }
         
         return destinationFile;
     }
     
     
     /**
      * Calculates the destination file.
      *
      * @param resource the source file
      * @return the destination file's parent directory
      */
     private File getDestinationFile( File resource )
     {
         File parent = resource.getParentFile();
         Stack<String> fileComponentStack = new Stack<String>();
         fileComponentStack.push( resource.getName() );
         
         while ( parent != null )
         {
             if ( parent.getName().equals( "schema" ) )
             {
                 // All LDIF files besides the schema.ldif are under the 
                 // schema/schema base path. So we need to add one more 
                 // schema component to all LDIF files minus this schema.ldif
                 fileComponentStack.push( "schema" );
                 
                 return assembleDestinationFile( fileComponentStack );
             }
 
             fileComponentStack.push( parent.getName() );
             
             if ( parent.equals( parent.getParentFile() )
                     || parent.getParentFile() == null )
             {
                 throw new IllegalStateException( I18n.err( I18n.ERR_08005 ) );
             }
             
             parent = parent.getParentFile();
         }
 
         /*
 
            this seems retarded so I replaced it for now with what is below it
            will not break from loop above unless parent == null so the if is
            never executed - just the else is executed every time
 
         if ( parent != null )
         {
             return assembleDestinationFile( fileComponentStack );
         }
         else
         {
             throw new IllegalStateException( "parent cannot be null" );
         }
         
         */
 
         throw new IllegalStateException( I18n.err( I18n.ERR_08006 ) );
     }
     
     
     /**
      * Gets the DBFILE resource from within a jar off the base path.  If another jar
      * with such a DBFILE resource exists then an error will result since the resource
      * is not unique across all the jars.
      *
      * @param resourceName the file name of the resource to load
      * @param resourceDescription human description of the resource
      * @return the InputStream to read the contents of the resource
      * @throws IOException if there are problems reading or finding a unique copy of the resource
      */                                                                                                
     public static InputStream getUniqueResourceAsStream( String resourceName, String resourceDescription ) throws IOException
     {
         resourceName = BASE_PATH   resourceName;
         URL result = getUniqueResource( resourceName, resourceDescription );
         return result.openStream();
     }
 
 
     /**
      * Gets a unique resource from a Jar file.
      * 
      * @param resourceName the name of the resource
      * @param resourceDescription the description of the resource
      * @return the URL to the resource in the Jar file
      * @throws IOException if there is an IO error
      */
     public static URL getUniqueResource( String resourceName, String resourceDescription )
             throws IOException
     {
         Enumeration<URL> resources = MyDefaultSchemaLdifExtractor.class.getClassLoader().getResources( resourceName );
         if ( !resources.hasMoreElements() )
         {
             throw new UniqueResourceException( resourceName, resourceDescription );
         }
         URL result = resources.nextElement();
         if ( resources.hasMoreElements() )
         {
             throw new UniqueResourceException( resourceName, result, resources, resourceDescription);
         }
         return result;
     }
     
 
     /**
      * Extracts the LDIF schema resource from a Jar.
      *
      * @param resource the LDIF schema resource
      * @throws IOException if there are IO errors
      */
     private void extractFromJar( String resource ) throws IOException
     {
      System.out.println("-->extractFromJar resource =" resource);
         byte[] buf = new byte[512];
         InputStream in = MyDefaultSchemaLdifExtractor.getUniqueResourceAsStream( resource,
             "LDIF file in schema repository" );
 
         try
         {
             File destination = new File( outputDirectory, resource );
             System.out.println("--> destination =" destination.getAbsolutePath());
             /*
              * Do not overwrite an LDIF file if it has already been extracted.
              */
             if ( destination.exists() )
             {
              System.out.println("--> destination not exists.");
                 return;
             }
         
             if ( ! destination.getParentFile().exists() )
             {
                 destination.getParentFile().mkdirs();
             }
             
             FileOutputStream out = new FileOutputStream( destination );
             try
             {
                 while ( in.available() > 0 )
                 {
                     int readCount = in.read( buf );
                     out.write( buf, 0, readCount );
                 }
                 out.flush();
             } finally
             {
                 out.close();
             }
         }
         finally
         {
             in.close();
         }
     }
 }



然後在Bundle 的context-xml 設定相關的bean,init-method....
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd">

 <!-- regular spring configuration file defining the beans for this bundle. 
  The configuration of OSGi definitions is kept in a separate configuration 
  file so that this file can easily be used for integration testing outside 
  of an OSGi environment -->

 <bean id="actavitor" class="com.embed.ldap.service.MyActavitor"
  init-method="start" destroy-method="stop">
  <property name="embeddedAds" ref="embeddedADS" />
 </bean>

 <bean id="ctxRsLoader" class="com.embed.ldap.service.internal.ContextResourceLoader"></bean>

 <bean id="embeddedADS" class="com.embed.ldap.service.EmbeddedADSVer157">
  <constructor-arg>
   <ref bean="ctxRsLoader" />
  </constructor-arg>
  <constructor-arg type="java.lang.String">
   <value>classpath:META-INF/ldap/server-work/</value>
  </constructor-arg>
 </bean>


</beans>

再來設定Bundle的MANIFEST.MF檔
Manifest-Version: 1.0
Export-Package: com.embed.ldap.service;version="1.0.0";uses:="org.apac
 he.directory.server.core.partition",
 org.apache.directory.server.core;version="1.5.7",
 org.apache.directory.server.core.changelog;version="1.5.7",
 org.apache.directory.server.core.entry;version="1.5.7",
 org.apache.directory.server.core.partition;version="1.5.7",
 org.apache.directory.server.core.partition.impl.btree.jdbm;version="1.5.7",
 org.apache.directory.server.core.partition.ldif;version="1.5.7",
 org.apache.directory.server.core.schema;version="1.5.7",
 org.apache.directory.server.ldap;version="1.5.7",
 org.apache.directory.server.protocol.shared.transport;version="1.5.7",
 org.apache.directory.shared.ldap.entry,
 org.apache.directory.shared.ldap.exception,
 org.apache.directory.shared.ldap.name,
 org.apache.directory.shared.ldap.schema,
 org.apache.directory.shared.ldap.schema.ldif.extractor,
 org.apache.directory.shared.ldap.schema.ldif.extractor.impl,
 org.apache.directory.shared.ldap.schema.loader.ldif,
 org.apache.directory.shared.ldap.schema.manager.impl,
 org.apache.directory.shared.ldap.schema.registries
Build-Jdk: 1.6.0_24
Built-By: momo
Bundle-Version: 1.0.0
Tool: Bnd-1.15.0
Bnd-LastModified: 1302856707906
Bundle-Name: Spring OSGi Bundle
Bundle-ManifestVersion: 2
Bundle-ClassPath: .;/META-INF/lib/apacheds-all-1.5.7.jar
Created-By: Apache Maven Bundle Plugin
Bundle-SymbolicName: OSGiWebApp.embedLdap
Import-Package: javax.naming,
 javax.naming.directory,
 javax.naming.ldap,
 javax.net.ssl,
 javax.security.auth.x500,
 javax.security.sasl,
 org.apache.directory.server.core;version="[1.0.2,1.0.2]",
 org.apache.directory.server.core.changelog;version="1.5.7",
 org.apache.directory.server.core.entry;version="1.5.7",
 org.apache.directory.server.core.partition;version="1.5.7",
 org.apache.directory.server.core.partition.impl.btree.jdbm;version="1.5.7",
 org.apache.directory.server.core.partition.ldif;version="1.5.7",
 org.apache.directory.server.core.schema;version="1.5.7",
 org.apache.directory.server.ldap;version="1.5.7",
 org.apache.directory.server.protocol.shared.transport;version="1.5.7",
 org.apache.directory.shared.ldap.entry,
 org.apache.directory.shared.ldap.exception,
 org.apache.directory.shared.ldap.name,
 org.apache.directory.shared.ldap.schema,
 org.apache.directory.shared.ldap.schema.ldif.extractor,
 org.apache.directory.shared.ldap.schema.ldif.extractor.impl,
 org.apache.directory.shared.ldap.schema.loader.ldif,
 org.apache.directory.shared.ldap.schema.manager.impl,
 org.apache.directory.shared.ldap.schema.registries,
 org.slf4j;version="1.6.1",
 org.springframework.beans,
 org.springframework.context,
 org.springframework.core.io


檔案結構如下


* 這邊有一個問題就是EmbeddedADSVer157.java 在constructor 裡會設定workDir
workDir就是LDAP 相關檔案建立的位置, schema會從jar裡面拿出來,
解法是先在java.io.tmpdir 路徑裡先把ApacheDS的schema都先從jar裡先拿出來放
這樣在server start時就不會再去找jar file, 因為Bundle 的classpath 是沒辦法直接拿到實體檔
必須透過Bundle Context 取Resource or Spring 的Application Context 取Resource


System.getProperty( "java.io.tmpdir" )   "/server-work" 
最主要是因為ApacheDS裡面的ResourceMap在getResources時取的classpath為
String classPath = System.getProperty( "java.class.path", "." );
在Eclispe Virgo 的話會取到C:\Eclipse-Virgo-WebServer\lib ......
所以上面才會用tempdir的路徑,eclipse virgo裡是C:\Eclipse-Virgo-WebServer\work\tmp

不然就必須去修改相關getResource的class, 跟OSGi Bundle的Resource match

2011年4月27日 星期三

Spring + Hibernate , Session & Transaction

Spring + Hibernate 整合的時候, 一般設計上會分Service Layer, Dao Layer, 在Service method 執行階段去設定Transaction , 這邊是有一種Project 在設計上造成Hibernate SessionException,
"Session was already closed"
出錯的問題在於DAO的實作都用this.getSession() ,(All Dao Layer class extends HibernateDaoSupport)
來取得hibernate Session ,但Service & DAO 的Reference 關系很復雜。
(Spring 的文件中提到, spring 的Session是一個thread-bound Session,它是和某個Thread绑定的,而這個Thread往往就是載入Servlet/Jsp的那的thread,實際的意思就是其生命周期scope是request/response的。
getSession取得了Hibernate的Session,這個Session可能是當前request中之前使用過的,也可能是一個新的,this.getSession()就有可能造成transaction 在commit的時候closed session出錯)


雖然繼承了HibernateDaoSupport這個類,但是this.getSession(),獲得的session也要在使用後關閉,因為這個session是原生的session不是經過sping代理過的,並且還沒有Transaction,自動提交,自動關閉連接等功能,所以使用使用getSession()獲得session時一定要關閉。

Hibernate documentation 提到的 Session Object
A Session is an inexpensive, non-threadsafe object that should be used once and then discarded for: a single request, a conversation or a single unit of work. A Session will not obtain a JDBC Connection, or a Datasource, unless it is needed. It will not consume any resources until used.
Do not use the session-per-operation antipattern,
The most common pattern in a multi-user client/server application is session-per-request.


HiberDaoSupport代碼
/**
  * Obtain a Hibernate Session, either from the current transaction or
  * a new one. The latter is only allowed if "allowCreate" is true.
  * <p><b>Note that this is not meant to be invoked from HibernateTemplate code
  * but rather just in plain Hibernate code.</b> Either rely on a thread-bound
  * Session or use it in combination with {@link #releaseSession}.
  * <p>In general, it is recommended to use
  * {@link #getHibernateTemplate() HibernateTemplate}, either with
  * the provided convenience operations or with a custom
  * {@link org.springframework.orm.hibernate3.HibernateCallback} that
  * provides you with a Session to work on. HibernateTemplate will care
  * for all resource management and for proper exception conversion.
  * @param allowCreate if a non-transactional Session should be created when no
  * transactional Session can be found for the current thread
  * @return the Hibernate Session
  * @throws DataAccessResourceFailureException if the Session couldn't be created
  * @throws IllegalStateException if no thread-bound Session found and allowCreate=false
  * @see org.springframework.orm.hibernate3.SessionFactoryUtils#getSession(SessionFactory, boolean)
  */
protected final Session getSession(boolean allowCreate)
     throws DataAccessResourceFailureException, IllegalStateException {

  return (!allowCreate ?
      SessionFactoryUtils.getSession(getSessionFactory(), false) :
    SessionFactoryUtils.getSession(
      getSessionFactory(),
      this.hibernateTemplate.getEntityInterceptor(),
      this.hibernateTemplate.getJdbcExceptionTranslator()));
 }

allowCreate default 數值為true , this.getSession()能從當前的事務或新的事務取的一個新的Hibernate session object,this.getHibernateTemplate().getSessionFactory().getCurrentSession()/openSession()則從spring中獲取session
getCurrentSession()創建的Session會綁定到當前的線程中去、而採用OpenSession()則不會。
採用getCurrentSession()創建的Session在commit或rollback後會自動關閉,採用OpenSession()必須手動關閉。



看看HibernateTemplate
public List find(final String queryString, final Object... values) throws DataAccessException {
  return executeWithNativeSession(new HibernateCallback<List>() {
   public List doInHibernate(Session session) throws HibernateException {
    Query queryObject = session.createQuery(queryString);
    prepareQuery(queryObject);
    if (values != null) {
     for (int i = 0; i < values.length; i  ) {
      queryObject.setParameter(i, values[i]);
     }
    }
    return queryObject.list();
   }
  });
 }


/**
  * Execute the action specified by the given action object within a
  * native {@link org.hibernate.Session}.
  * <p>This execute variant overrides the template-wide
  * {@link #isExposeNativeSession() "exposeNativeSession"} setting.
  * @param action callback object that specifies the Hibernate action
  * @return a result object returned by the action, or <code>null</code>
  * @throws org.springframework.dao.DataAccessException in case of Hibernate errors
  */
 public <T> T executeWithNativeSession(HibernateCallback<T> action) {
  return doExecute(action, false, true);
 }



/**
  * Execute the action specified by the given action object within a Session.
  * @param action callback object that specifies the Hibernate action
  * @param enforceNewSession whether to enforce a new Session for this template
  * even if there is a pre-bound transactional Session
  * @param enforceNativeSession whether to enforce exposure of the native
  * Hibernate Session to callback code
  * @return a result object returned by the action, or <code>null</code>
  * @throws org.springframework.dao.DataAccessException in case of Hibernate errors
  */
 protected <T> T doExecute(HibernateCallback<T> action, boolean enforceNewSession, boolean enforceNativeSession)
   throws DataAccessException {

  Assert.notNull(action, "Callback object must not be null");

  Session session = (enforceNewSession ?
    SessionFactoryUtils.getNewSession(getSessionFactory(), getEntityInterceptor()) : getSession());
  boolean existingTransaction = (!enforceNewSession &&
    (!isAllowCreate() || SessionFactoryUtils.isSessionTransactional(session, getSessionFactory())));
  if (existingTransaction) {
   logger.debug("Found thread-bound Session for HibernateTemplate");
  }

  FlushMode previousFlushMode = null;
  try {
   previousFlushMode = applyFlushMode(session, existingTransaction);
   enableFilters(session);
   Session sessionToExpose =
     (enforceNativeSession || isExposeNativeSession() ? session : createSessionProxy(session));
   T result = action.doInHibernate(sessionToExpose);
   flushIfNecessary(session, existingTransaction);
   return result;
  }
  catch (HibernateException ex) {
   throw convertHibernateAccessException(ex);
  }
  catch (SQLException ex) {
   throw convertJdbcAccessException(ex);
  }
  catch (RuntimeException ex) {
   // Callback code threw application exception...
   throw ex;
  }
  finally {
   if (existingTransaction) {
    logger.debug("Not closing pre-bound Hibernate Session after HibernateTemplate");
    disableFilters(session);
    if (previousFlushMode != null) {
     session.setFlushMode(previousFlushMode);
    }
   }
   else {
    // Never use deferred close for an explicitly new Session.
    if (isAlwaysUseNewSession()) {
     SessionFactoryUtils.closeSession(session);
    }
    else {
     SessionFactoryUtils.closeSessionOrRegisterDeferredClose(session, getSessionFactory());
    }
   }
  }
 }



再來重要的就是HibernateTransactionManager, 在doBegin 裡面, HibernateTransactionObject的一個實例,這個實例裡主要存放的就是sessionholder,sessionholder裡存放的就是開始事務的session and Transaction,如果之前沒有 sessionholder存放到thread,那麼這個 HibernateTransactionObject的實例的屬性其實是空的。
如果Transaction中並沒有存放sessionholder,那麼就新建一個session,放到新的sessionholder中,再放到HibernateTransactionObject的實例中。
如果給service設置聲明式Transaction,假設Transaction為required,然後一個service調用另一個service時,他們其實是共用一個session,原則是沒有就create,反之則不create,並返回之前已create的session和transaction。 也就是說spring通過threadlocal把s​​ession和對應的transaction放到線程之中,保證了在整個方法棧的任何一個地方都能得到同一個session和transaction。
所以如果你的方法在事務體之內,那麼你只要通過hibernatesupportdao或者hibernatetemplate來得到session的話,那這個session一定是開始事務的那個session,這個得到session的主要方法在SessionFactoryUtils裡。


HibernateTransactionManager
@Override
 protected void doBegin(Object transaction, TransactionDefinition definition) {
  HibernateTransactionObject txObject = (HibernateTransactionObject) transaction;

  if (txObject.hasConnectionHolder() && !txObject.getConnectionHolder().isSynchronizedWithTransaction()) {
   throw new IllegalTransactionStateException(
     "Pre-bound JDBC Connection found! HibernateTransactionManager does not support "  
     "running within DataSourceTransactionManager if told to manage the DataSource itself. "  
     "It is recommended to use a single HibernateTransactionManager for all transactions "  
     "on a single DataSource, no matter whether Hibernate or JDBC access.");
  }

  Session session = null;

  try {
   if (txObject.getSessionHolder() == null || txObject.getSessionHolder().isSynchronizedWithTransaction()) {
    Interceptor entityInterceptor = getEntityInterceptor();
    Session newSession = (entityInterceptor != null ?
      getSessionFactory().openSession(entityInterceptor) : getSessionFactory().openSession());
    if (logger.isDebugEnabled()) {
     logger.debug("Opened new Session ["   SessionFactoryUtils.toString(newSession)  
       "] for Hibernate transaction");
    }
    txObject.setSession(newSession);
   }

   session = txObject.getSessionHolder().getSession();

   if (this.prepareConnection && isSameConnectionForEntireSession(session)) {
    // We're allowed to change the transaction settings of the JDBC Connection.
    if (logger.isDebugEnabled()) {
     logger.debug(
       "Preparing JDBC Connection of Hibernate Session ["   SessionFactoryUtils.toString(session)   "]");
    }
    Connection con = session.connection();
    Integer previousIsolationLevel = DataSourceUtils.prepareConnectionForTransaction(con, definition);
    txObject.setPreviousIsolationLevel(previousIsolationLevel);
   }
   else {
    // Not allowed to change the transaction settings of the JDBC Connection.
    if (definition.getIsolationLevel() != TransactionDefinition.ISOLATION_DEFAULT) {
     // We should set a specific isolation level but are not allowed to...
     throw new InvalidIsolationLevelException(
       "HibernateTransactionManager is not allowed to support custom isolation levels: "  
       "make sure that its 'prepareConnection' flag is on (the default) and that the "  
       "Hibernate connection release mode is set to 'on_close' (SpringTransactionFactory's default). "  
       "Make sure that your LocalSessionFactoryBean actually uses SpringTransactionFactory: Your "  
       "Hibernate properties should *not* include a 'hibernate.transaction.factory_class' property!");
    }
    if (logger.isDebugEnabled()) {
     logger.debug(
       "Not preparing JDBC Connection of Hibernate Session ["   SessionFactoryUtils.toString(session)   "]");
    }
   }

   if (definition.isReadOnly() && txObject.isNewSession()) {
    // Just set to NEVER in case of a new Session for this transaction.
    session.setFlushMode(FlushMode.MANUAL);
   }

   if (!definition.isReadOnly() && !txObject.isNewSession()) {
    // We need AUTO or COMMIT for a non-read-only transaction.
    FlushMode flushMode = session.getFlushMode();
    if (flushMode.lessThan(FlushMode.COMMIT)) {
     session.setFlushMode(FlushMode.AUTO);
     txObject.getSessionHolder().setPreviousFlushMode(flushMode);
    }
   }

   Transaction hibTx;

   // Register transaction timeout.
   int timeout = determineTimeout(definition);
   if (timeout != TransactionDefinition.TIMEOUT_DEFAULT) {
    // Use Hibernate's own transaction timeout mechanism on Hibernate 3.1 
    // Applies to all statements, also to inserts, updates and deletes!
    hibTx = session.getTransaction();
    hibTx.setTimeout(timeout);
    hibTx.begin();
   }
   else {
    // Open a plain Hibernate transaction without specified timeout.
    hibTx = session.beginTransaction();
   }

   // Add the Hibernate transaction to the session holder.
   txObject.getSessionHolder().setTransaction(hibTx);

   // Register the Hibernate Session's JDBC Connection for the DataSource, if set.
   if (getDataSource() != null) {
    Connection con = session.connection();
    ConnectionHolder conHolder = new ConnectionHolder(con);
    if (timeout != TransactionDefinition.TIMEOUT_DEFAULT) {
     conHolder.setTimeoutInSeconds(timeout);
    }
    if (logger.isDebugEnabled()) {
     logger.debug("Exposing Hibernate transaction as JDBC transaction ["   con   "]");
    }
    TransactionSynchronizationManager.bindResource(getDataSource(), conHolder);
    txObject.setConnectionHolder(conHolder);
   }

   // Bind the session holder to the thread.
   if (txObject.isNewSessionHolder()) {
    TransactionSynchronizationManager.bindResource(getSessionFactory(), txObject.getSessionHolder());
   }
   txObject.getSessionHolder().setSynchronizedWithTransaction(true);
  }

  catch (Exception ex) {
   if (txObject.isNewSession()) {
    try {
     if (session.getTransaction().isActive()) {
      session.getTransaction().rollback();
     }
    }
    catch (Throwable ex2) {
     logger.debug("Could not rollback Session after failed transaction begin", ex);
    }
    finally {
     SessionFactoryUtils.closeSession(session);
    }
   }
   throw new CannotCreateTransactionException("Could not open Hibernate Session for transaction", ex);
  }
 }



結論:
Session的使用維護上有幾種:
1.getCurrentSession() : 獲得當前會話中的session,該session有容器自行維護管理,Spring可以代理事務。
2.this.getSession() : 從當前的執行中獲得或create 一個hibernate的session,自己關閉,釋放連接資源。
3.openSession(); 調用函數自行create一個數據庫的連接,並將其打開,在使用Spring操作非查詢語句的請況下,Spring的transaction 對該session對像不起到Transaction 管理的作用,所以該session對象應自己關閉,釋放連接資源。
當使用Spring 去管理Hibernate Session的時候,DAO Extends HibernateDaoSupport 時,session的取得我們並不在乎,如果需要取得session做處理時,HibernateTemplate提供HibernateCallback,就是为了满足使用了HibernateTemplate的情况下,仍然需要直接訪問Session的狀況。

個人認為Service Layer,DAO Layer所做的事應該單一化, Service 針對Method 做Transaction , 一個Method 代表的應該是一個完整的Transaction動作,要Reference其他Service所做的事時應該在Controll 呼叫2個不同的Service method。而DAO 所做的事應該是單純的DB動作。

Notes:

ServiceA 有個method 叫 find(id) , @Transactional(propagation=Propagation.REQUIRED)
ServiceB 也有個method 叫 find(id) , @Transactional(propagation=Propagation.REQUIRED)
然後在ServiceA 的find(id) 中 call ServiceB 的find(id)
Spring Log如下
Ts-[] ,15:24:47 [DEBUG] AbstractPlatformTransactionManager.java:370 - Creating new transaction with name [com.service.UserServiceImpl.find]: PROPAGATION_REQUIRED,ISOLATION_DEFAULT
  Ts-[] ,15:24:47 [DEBUG] HibernateTransactionManager.java:493 - Opened new Session [org.hibernate.impl.SessionImpl@940f82] for Hibernate transaction
  Ts-[] ,15:24:47 [DEBUG] HibernateTransactionManager.java:504 - Preparing JDBC Connection of Hibernate Session [org.hibernate.impl.SessionImpl@940f82]
  Ts-[] ,15:24:47 [DEBUG] DriverManagerDataSource.java:162 - Creating new JDBC DriverManager Connection to [jdbc:mysql://127.0.0.1:3306/test]
  Ts-[] ,15:24:47 [DEBUG] HibernateTransactionManager.java:569 - Exposing Hibernate transaction as JDBC transaction [com.mysql.jdbc.JDBC4Connection@92668c]
15:24:47 [DEBUG] HibernateTransactionManager.java:437 - Found thread-bound Session [org.hibernate.impl.SessionImpl@940f82] for Hibernate transaction
  Ts-[] ,15:24:47 [DEBUG] AbstractPlatformTransactionManager.java:468 - Participating in existing transaction
[DEBUG] AbstractPlatformTransactionManager.java:729 - Initiating transaction commit
  Ts-[] ,15:24:47 [DEBUG] HibernateTransactionManager.java:652 - Committing Hibernate transaction on Session [org.hibernate.impl.SessionImpl@940f82]
  Ts-[] ,15:24:47 [DEBUG] HibernateTransactionManager.java:734 - Closing Hibernate Session [org.hibernate.impl.SessionImpl@940f82] after transaction
  Ts-[] ,15:24:47 [DEBUG] SessionFactoryUtils.java:784 - Closing Hibernate Session
設為 REQUIRED - Support a current transaction, create a new one if none exists.
詳見Spring API doc





See also:

Hibernate Documentation

Spring Documentation

Tutorial:Create Spring 3 MVC Hibernate 3 Example

2011年4月25日 星期一

Java ClassLoader & OSGi ClassLoader

Java的classLoader在程式啟動之後,主要會有三個:Bootstrap Loader、ExtClassLoader與AppClassLoader。

Delegation Model
The class loaders are hierarchical in nature, they use delegation model when loading the class.
當classloader有類需要載入時先讓其parent搜尋其搜尋路徑幫忙載入,如果parent找不到,在由自己搜尋自己的搜尋路徑載入,如果自訂ClassLoader沒有改寫 loadClass method 的情況下,只有在一個 class 尚未被載入過,且parent class loader 也無法載入指定的 class,才會使用到 findClass method。
可參考:
Java Classloader Wiki
Understanding Java Classloaders
解讀ClassLoader


OSGi最常碰到的問題就是ClassLoader,OSGi中的每個Bundle 都有一個獨立的Class Loader,Bundle 之間通過不同的ClassLoader和在MANIFEST.MF 文件中定義的條件達到class的共享。


bundle之間類的共享:

通過export package的方式實現的,在bundle的manifest中通過制定export package的方式將特定的package與其他的bundle共享。

而引用其他bundle所暴露的package有兩種方式,第一是通過import package的方式,第二種是通過required bundle的方式

OSGi容器為每個Bundle建立不同的classloader,因此每個Bundle能訪問位於下列位置中的class:
a) 位於Java啟動classpath下的、所有以Java.*開頭的package class;
b) 位於OSGi框架classlpath下的class,通常有一個獨立的classloader負責加載框架的實現類及關鍵的interface class;
c) 位於Bundle空間中的class,這些類通常包含在與Bundle相關的jar文件中,以及加到這個Bundle中的其它jar包中的class。
d) import package中的class

實際運行環境中,Bundle 的Class Loader 根據如下規則去搜索類資源:
1:如Class Resource 屬於java.* 包,則將加載請求委託給父加載器
2:如Class Resource 定義在OSGi 框架中啟動委託列表(org.osgi.framework.bootdelegation)中,則將加載請求委託給父加載器
3:如Class Resource 屬於在Import-Package 中定義的包,則框架通過Class Loader 依賴關係圖找到導出此package的Bundle 的Class Loader,並將加載請求委託給此Class Loader
4:如Class Resource 屬於在Require-Bundle 中定義的Bundle,則框架通過Class Loader 依賴關係圖找到此5:Bundle 的Class Loader,將加載請求委託給此Class Loader ;
6:Bundle 搜索自己的類資源( 包括Bundle-Classpath 裡面定義的類路徑和屬於Bundle 的Fragment 的類資源)
7:若類在DynamicImport-Package 中定義,則開始嘗試在運行環境中尋找符合條件的Bundle 。

下圖是一個簡單的osgi classloader :

然後這個是一個完整的osgi classloader




建立一個Spring Dm bundle , 設定bean Start() method
<bean id="actavitor" class="com.my.loader.internal.Actavitor"
  init-method="start" destroy-method="stop">
</bean>

Actavitor.java
public void start() throws Exception {
  System.out.println("Hello Spring OSGi World!! ");
  System.out.println("class->"+this.getClass());
  System.out.println("classLoader->"+this.getClass().getClassLoader());
  System.out.println("classLoader.getParent->"+this.getClass().getClassLoader().getParent());
  System.out.println("classLoader.getParent.getClass->"+this.getClass().getClassLoader().getParent().getClass());
  
  System.out.println("applicationCtx.getClass ->"+applicationCtx.getClass());
  System.out.println("applicationCtx.getClassLoader ->"+applicationCtx.getClassLoader());
  System.out.println("applicationCtx.getClassLoader.getClass ->"+applicationCtx.getClassLoader().getClass());
  
 }

Result
[2011-04-25 13:39:26.093] region-dm-11                 System.out                                                        Hello Spring OSGi World!!  
[2011-04-25 13:39:26.093] region-dm-11                 System.out                                                        class->class com.my.loader.internal.Actavitor 
[2011-04-25 13:39:26.093] region-dm-11                 System.out                                                        classLoader->KernelBundleClassLoader: [bundle=com.my.loader.BundleClassLoaderTester_1.0.0] 
[2011-04-25 13:39:26.093] region-dm-11                 System.out                                                        classLoader.getParent->org.eclipse.osgi.launch.EquinoxFWClassLoader@1d77d9e 
[2011-04-25 13:39:26.093] region-dm-11                 System.out                                                        classLoader.getParent.getClass->class org.eclipse.osgi.launch.EquinoxFWClassLoader 
[2011-04-25 13:39:26.093] region-dm-11                 System.out                                                        applicationCtx.getClass ->class org.springframework.osgi.context.support.OsgiBundleXmlApplicationContext 
[2011-04-25 13:39:26.093] region-dm-11                 System.out                                                        applicationCtx.getClassLoader ->BundleDelegatingClassLoader for [Spring OSGi Bundle (com.my.loader.BundleClassLoaderTester)] 

Eclipse Virgo 的Bundle ClassLoader 為KernelBundleClassLoader
而KernelBundleClassLoader的parent為EquinoxFWClassLoader

Spring DM 會為每個bundle建立ApplicationContext , classloader透過BundleDelegatingClassLoader
BundleDelegatingClassLoader部份代碼
private final ClassLoader bridge;
.
.
/**
  * Private constructor.
  * 
  * Constructs a new <code>BundleDelegatingClassLoader</code> instance.
  * 
  * @param bundle
  * @param bridgeLoader
  */
 protected BundleDelegatingClassLoader(Bundle bundle, ClassLoader bridgeLoader) {
  super(null);
  Assert.notNull(bundle, "bundle should be non-null");
  this.backingBundle = bundle;
  this.bridge = bridgeLoader;
 }

 protected Class findClass(String name) throws ClassNotFoundException {
  try {
   return this.backingBundle.loadClass(name);
  }
  catch (ClassNotFoundException cnfe) {
   DebugUtils.debugClassLoading(backingBundle, name, null);
   throw new ClassNotFoundException(name   " not found from bundle ["   backingBundle.getSymbolicName()   "]",
    cnfe);
  }
  catch (NoClassDefFoundError ncdfe) {
   // This is almost always an error
   // This is caused by a dependent class failure,
   // so make sure we search for the right one.
   String cname = ncdfe.getMessage().replace('/', '.');
   DebugUtils.debugClassLoading(backingBundle, cname, name);
   NoClassDefFoundError e = new NoClassDefFoundError(name   " not found from bundle ["
       OsgiStringUtils.nullSafeNameAndSymName(backingBundle)   "]");
   e.initCause(ncdfe);
   throw e;
  }
 }
.
.
.
.

OSGi classLoader basic有提到 DefaultClassLoader. To get the allocated Bundle of a DefaultClassLoader we can call the getDelegate() Method.SpringDm 裡的BundleDelegatingClassLoader有設定bridge classloader, class loader adapter..主要還是透過org.osgi.framework.Bundle 的classloader來取的每個Bundle 的classloader。


Reference:
Basics about OSGi Classloading
http://codescale.wordpress.com/2009/05/22/basics-about-osgi-classloading/


Class Loader Architecture Comparison – Java, J2EE and OSGi
http://shylendrabhat.com/blog/2009/11/21/class-loader-architecture-comparison-java-j2ee-and-osgi/

classloader相關基礎知識
http://www.iteye.com/topic/25053

探索OSGi 框架的組件運行機制
http://www.ibm.com/developerworks/cn/java/j-lo-osgi/

深入了解Java ClassLoader、Bytecode 、ASM、cglib
http://www.iteye.com/topic/98178
http://blog.csdn.net/chief1985/archive/2009/08/12/4440398.aspx

2011年4月22日 星期五

OSGi - hot deployment

『即使你採用OSGi,但也不代表你的應用就具備了hot deployment的能力,在hot deployment上,完美的結果就是當更新完成後,新的執行請求就在新的代碼邏輯上正確的執行,就像沒發生過更新這回事樣』
  
某篇Blog看到的,在SpringDM + Equninox 中, 除了在Console 裡執行INSTALL,UPDATE,UNINSTALL,REFRESH,
4個指令可以達到針對Bundle的處理,相關在SpringDM的設定也有一些能達到。

  如果要update一個class,我們可以考慮Update整個bundle, 更新可以用update該bundle(在MANIFEST.MF中增加Bundle-UpdateLocation來指定Bundle更新時所使用的文件),或著是unsintall舊的,再install新的。
  Equinox中,當update一個Bundle時,如果這個Bundle中有Export package,此Bundle是singleton模式的話,在update後仍然保留了同樣的Bundle SymbolicName,是無法update成功的,會報出一個已經有相同的Singleton的Bundle存在的錯誤,因此update這種方法僅適用於沒有對外Export package的bundle,如bundle沒有對外Export package,Equinox則可正常的完成update過程。這又跟OSGi對於Install Update...等行為的classLoader有關。
OSGi實現hot deployment時,要合理的規劃系統,比較好的方法如:
   1: Interface & implements 分開
2: 盡量使用OSGi Service的方法, 而不是直接使用其Class方式

實際上在SpringDM(On Eclipse Virgo)上有一些方法可達到差不多的效果

我們有一個Bundle 內含一個Interface ,
com.xyz.MessageService 

用Spring DM 註冊一個OSGi Service時會這樣寫
<beans:bean id="beanToPublish" class="com.xyz.imp.MessageServiceImp"/>   
<service ref="beanToPublish" interface="com.xyz.MessageService"/>  
這時就會將一個OSGi Service register,
com.xyz.MessageService 
會publish 到osgi service上

如果有另一個Service定義如下
<beans:bean id="beanToPublish2" class="com.xyz.imp.MessageServiceImp2"/>   
<service ref="beanToPublish2" interface="com.xyz.MessageService"/>  

這時候如果有一個bundle reference 到這個osgi service 的interface 時會如何?
(如果這邊的2個Service 的interface 是自帶在自己的Bundle裡面的話,會被認為不同的Service)
<osgi:reference id="beanToReference" interface="com.xyz.MessageService"/>

這時會導入較早被啟動的bundle , OSGi在bundle 啟動時會給予一個ID值,愈早的值愈小。

如果我們要達成hot deploye 的方法, 在SpringDm裡面有一些方法能用
1: Service Ranking
使用OSGi Service下的 Ranking Attribute 時, rank 高的會被reference(相同interface impl的bundle) , 此時原本reference的bundle不需要再refresh or update , 直接hot deployed
<service ref="beanToBeExported" interface="com.xyz.MyServiceInterface"  ranking="9"/>

2: Bundle Object
在Spring Dm裡面使用osgi:bundle,可以建立一個bundle object, action設成Update,在Bundle被deploy時會觸發update的動作
<osgi:bundle id="aBundle" symbolic-name="OSGiWebApp.validateXML" action="update"/>

Mediator Pattern

最近在K OSGi 的部份, 練習寫一些bundle 的時候影響不大, 但是當整個Project 建立在OSGi 架構上,再變成一個Web Application Bundle 時就有一些問題會出現了,有一篇文章看了一下
Soruce : ht tp://wimpi.coalevo.net/2007/09/osgi-design-practice-loose-coupling.html

Mediator 的class diagram:


Mediator這個Pattern主要在解決交錯複雜的關係,常常我們程式會有一些複雜的關係存在,若彼此都互相緊密關聯,Mediator就扮演一個中介的角色,所有人只跟Mediator溝通,Mediator負責收集所有狀態的改變,並且通知所有需要牽動的人。Mediator 有點 Facade 和 Observer 的感覺, Mediator 就像是個 Observer ,每個元件都會把 Mediator 註冊到自己本身身上,當本身有變化時,就利用已註冊在自己身上的 Mediator 去通知自己已經變化了。

看了幾個Sample 之後, 發現最多的例子都是拿一個Swing 的元件, 比如說小算盤這種元件來當例子,
最簡單的實作就是

Mediator Interface
public interface Mediator {
   public void register(String name, Colleague c);
   public void call(String name, Colleague from);
}

Mediator Implements
public class ConcreteMediator implements Mediator{
   private Hashtable<String, Colleague> colleagues = new Hashtable<String, Colleague>();

   public void register(String name, Colleague c) {
       colleagues.put(name, c);
   }
   
   public void call(String name, Colleague from) {
       Colleague c = colleagues.get(name);
       if(c!=null){
           System.out.println(from.getName() + " call " + c.getName());
       }
   }
}

Interface colleague
public interface Colleague {
   public void call(String name);
   public String getName();
}

Colleague Implements 1
public class ConcreteColleague1 implements Colleague {
   private String name = "aa";
   private Mediator med;
   
   public ConcreteColleague1(Mediator med){
       this.med = med;
       
       this.med.register(this.name, this);
   }
   
   public String getName(){
       return this.name;
   }

   public void call(String name) {
       this.med.call(name, this);
   }
}


Colleague Implements 2
public class ConcreteColleague2 implements Colleague {
   private String name = "bb";
   private Mediator med;
   
   public ConcreteColleague2(Mediator med){
       this.med = med;
       
       this.med.register(this.name, this);
   }
   
   public String getName(){
       return this.name;
   }
   

   public void call(String name) {
       this.med.call(name, this);
   }
}







Example Reference:
Wiki : http://en.wikipedia.org/wiki/Mediator_pattern
http://smartlife.blog.51cto.com/1146871/277270
http://caterpillar.onlyfun.net/Gossip/DesignPattern/MediatorPattern.htm
http://humank.wordpress.com/2011/04/19/gof-behavioral-mediator-pattern/
http://chenjumin.iteye.com/blog/640853

Test java code

Show Java code using SyntaxHighlighter on blogger :



public static Map<String,Boolean> getResources( Pattern pattern )
    {
        HashMap<String,Boolean> retval = new HashMap<String,Boolean>();
        String classPath = System.getProperty( "java.class.path", "." );
        System.out.println(">>>>>>>getResources classPath = "+ classPath);      
        String[] classPathElements = classPath.split( File.pathSeparator );
        System.out.println(">>>>>>>classPathElements.length = "+ classPathElements.length);
        for ( String element : classPathElements )
        {
            getResources( retval, element, pattern );
        }
        
        return retval;
    }


Reference :
h ttp://sharedderrick.blogspot.com/2010/10/google-blogger-syntaxhighlighter-3083.html
h ttp://alexgorbatchev.com/SyntaxHighlighter/manual/brushes/java.html