Ich versuche, meiner Annotation Batch-Konfiguration einen Steplistener (ItemwriterListener) hinzuzufügen, keine Fehler alle, aber es wird nicht aufgerufen, warum? Es funktioniert im alten XML-Konfigurationsstil, aber nicht bei der Verwendung von Anmerkungen. Code unten. Leser und Prozessor werden weggelassen.Federbatch, der Etichewriterlistener ist nicht registriert und wird deshalb nicht aufgerufen, warum?
@ImportResource({ "classpath*:transform-delegator-job.xml", "classpath:config/context.xml" })
@SpringBootApplication
public class SpringBootTransformDelegatorJobApplication {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private static final List<String> OVERRIDDEN_BY_EXPRESSION_LIST = null;
private static final String OVERRIDDEN_BY_EXPRESSION_STRING = null;
@Autowired
private JobBuilderFactory jobBuilders;
@Autowired
private StepBuilderFactory stepBuilders;
@Bean
public JobBuilderFactory jobBuilderFactory(JobRepository jobRepository) {
return new JobBuilderFactory(jobRepository);
}
@Bean
public StepBuilderFactory stepBuilderFactory(JobRepository jobRepository, PlatformTransactionManager transactionManager) {
return new StepBuilderFactory(jobRepository, transactionManager);
}
@Bean
@StepScope
public ItemWriter<Record> fileItemWriter(@Value("#{jobParameters['tews.customer.url']}") String url, @Value("#{jobParameters['tews.customer.user']}") String user,
@Value("#{jobParameters['tews.customer.pwd']}") String pwd) {
FileItemWriter writer = new FileItemWriter();
TewsClient client = TewsClientFactory.getInstance(user, pwd, url);
writer.setTewsClient(client);
writer.setHrObjectDao(hrObjectDao(OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING));
return writer;
}
@Bean
@StepScope
public FlatFileItemReader<FieldSet> reader(@Value("#{jobParameters['input.file.delimitter']}") String delimitter, @Value("#{jobParameters['input.file.names']}") String filePath,
@Value("#{jobParameters['input.file.encoding']}") String encoding) throws Exception {
FlatFileItemReader<FieldSet> reader = new FlatFileItemReader<FieldSet>();
PathResource pathResources = new PathResource(Paths.get(filePath));
Scanner scanner = new Scanner(pathResources.getInputStream());
String names = scanner.nextLine();
scanner.close();
DelimitedLineTokenizer delimitedLineTokenizer = new DelimitedLineTokenizer();
delimitedLineTokenizer.setNames(names.split(delimitter));
delimitedLineTokenizer.setDelimiter(delimitter);
DefaultLineMapper<FieldSet> defaultLineMapper = new DefaultLineMapper<FieldSet>();
defaultLineMapper.setLineTokenizer(delimitedLineTokenizer);
defaultLineMapper.setFieldSetMapper(new PassThroughFieldSetMapper());
reader.setLineMapper(defaultLineMapper);
reader.setLinesToSkip(1);
reader.setEncoding(encoding);
reader.afterPropertiesSet();
return reader;
}
@Bean
@StepScope
public ItemProcessor<FieldSet, Record> csvFeedValidateProcessor(@Value("#{jobParameters['input.file.imeconfig.path']}") String imeConfigPath) {
FieldCollectionConfiguration fieldCollectionConfiguration = null;
try {
XMLUnmarshaller<FieldcollectionType> unmarshaller = new XMLUnmarshaller<FieldcollectionType>();
fieldCollectionConfiguration = fieldCollectionBeanToModelTransform().transform(unmarshaller.unmarshallByFile(FieldcollectionType.class, new File(imeConfigPath)));
} catch (UnmarshallingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
CsvFeedTransformProcessor csvFeedTransformProcessor = new CsvFeedTransformProcessor();
csvFeedTransformProcessor.setFieldCollectionConfiguration(fieldCollectionConfiguration);
return csvFeedTransformProcessor;
}
@Bean
@StepScope
public HRObjectDao hrObjectDao(@Value("#{jobParameters['ldap.customer.url']}") String url, @Value("#{jobParameters['ldap.customer.user']}") String user,
@Value("#{jobParameters['ldap.customer.pwd']}") String pwd, @Value("#{jobParameters['ldap.customer.bcontext']}") String bcontext) {
return new HRObjectDaoImpl(bcontext, url, user, pwd);
}
@Bean
public Transform<FieldcollectionType, FieldCollectionConfiguration> fieldCollectionBeanToModelTransform() {
return new FieldCollectionBeanToModelTransform();
}
@Bean
@StepScope
public MultiResourceItemReader<FieldSet> multiResourceReader(@Value("#{jobParameters['input.file.paths'].split(',')}") List<String> filePathList) throws Exception {
MultiResourceItemReader<FieldSet> multiResourceItemReader = new MultiResourceItemReader<FieldSet>();
multiResourceItemReader.setDelegate(reader(OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING));
PathResource[] pathResources = new PathResource[filePathList.size()];
for (int i = 0; i < filePathList.size(); i++) {
pathResources[i] = new PathResource(Paths.get(filePathList.get(i)));
}
multiResourceItemReader.setResources(pathResources);
return multiResourceItemReader;
}
@Bean
public JobParametersIncrementer jobParametersIncrementer() {
return new RunIdIncrementer();
}
@Bean
public Job job() throws Exception {
return jobBuilders.get("feedfiletransformer-delegate-job").listener(feedJobExecutionListener()).start(step1()).incrementer(jobParametersIncrementer()).build();
}
@Bean
public Step step1() throws Exception {
return stepBuilders.get("step1").listener(fileItemWriteListener(OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING)).<FieldSet, Record>chunk(1)
.reader(multiResourceReader(OVERRIDDEN_BY_EXPRESSION_LIST)).processor(csvFeedValidateProcessor(OVERRIDDEN_BY_EXPRESSION_STRING))
.writer(fileItemWriter(OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING, OVERRIDDEN_BY_EXPRESSION_STRING)).build();
}
@Bean
public FeedFileHandler feedFileHandler() {
return new FeedFileHandlerImpl();
}
@Bean
@StepScope
public ItemWriteListener<Path> fileItemWriteListener(@Value("#{jobParameters['feeddumpDirPath']}") String feeddumpDirPath,
@Value("#{jobParameters['processedOkDirPath']}") String processedOkDirPath, @Value("#{jobParameters['processedFailedDirPath']}") String processedFailedDirPath) {
FileItemWriteListener fileItemWriteListener = new FileItemWriteListener();
fileItemWriteListener.setFeedProcessedFailedDirectory(processedFailedDirPath);
fileItemWriteListener.setFeedProcessedOkDirectory(processedOkDirPath);
fileItemWriteListener.setFeeddumpDirPath(feeddumpDirPath);
fileItemWriteListener.setFeedFileHandler(feedFileHandler());
fileItemWriteListener.setRetryLimit(0);
return fileItemWriteListener;
}
@Bean
public JobExecutionListener feedJobExecutionListener() {
return new FeedJobExecutionListener();
}
public static void main(String[] args) throws Exception {
SpringApplication.run(SpringBootTransformDelegatorJobApplication.class, args);
}
}
haben Sie fügen '@ EnableBatchProcessing'? – leeor
Nun, ich musste diese Annotation im Allgemeinen vermeiden, alles wird fehlerbehaftet und mein Job kann nicht starten. Mein Job funktioniert einwandfrei (Reader, Prozessor, Writer, JobExecutionListener) ohne. Es ist nur dieser itemwriterlistener, der nicht aufgerufen wird :(sollte ich den gesamten Code in dieser Config-Klasse? – Elias
ja mehr von Ihrer Konfiguration würde helfen. Erhalten Sie irgendwelche Fehler mit mehr der Autokonfiguration? – leeor