Search
lxdream.org :: lxdream/src/gdrom/nrg.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/gdrom/nrg.c
changeset 736:a02d1475ccfd
prev678:35eb00945316
next832:40e5bb525c4e
author nkeynes
date Mon Jul 14 07:44:42 2008 +0000 (14 years ago)
permissions -rw-r--r--
last change Re-indent everything consistently
Fix include guards for consistency as well
file annotate diff log raw
1.1 --- a/src/gdrom/nrg.c Thu May 29 11:00:26 2008 +0000
1.2 +++ b/src/gdrom/nrg.c Mon Jul 14 07:44:42 2008 +0000
1.3 @@ -28,7 +28,7 @@
1.4 static gdrom_disc_t nrg_image_open( const gchar *filename, FILE *f );
1.5
1.6 struct gdrom_image_class nrg_image_class = { "Nero", "nrg",
1.7 - nrg_image_is_valid, nrg_image_open };
1.8 + nrg_image_is_valid, nrg_image_open };
1.9
1.10 #define NERO_V55_ID 0x4e455235
1.11 #define NERO_V50_ID 0x4e45524f
1.12 @@ -52,13 +52,13 @@
1.13
1.14 union nrg_footer {
1.15 struct nrg_footer_v50 {
1.16 - uint32_t dummy;
1.17 - uint32_t id;
1.18 - uint32_t offset;
1.19 + uint32_t dummy;
1.20 + uint32_t id;
1.21 + uint32_t offset;
1.22 } v50;
1.23 struct nrg_footer_v55 {
1.24 - uint32_t id;
1.25 - uint64_t offset;
1.26 + uint32_t id;
1.27 + uint64_t offset;
1.28 } v55;
1.29 };
1.30
1.31 @@ -98,13 +98,13 @@
1.32 uint8_t unknown[2]; /* always 01 01? */
1.33 uint8_t track_count;
1.34 struct nrg_daoi_track {
1.35 - char unknown[10];
1.36 - uint32_t sector_size __attribute__((packed)); /* Always 0? */
1.37 - uint8_t mode;
1.38 - uint8_t unknown2[3]; /* Always 00 00 01? */
1.39 - uint32_t pregap __attribute__((packed));
1.40 - uint32_t offset __attribute__((packed));
1.41 - uint32_t end __attribute__((packed));
1.42 + char unknown[10];
1.43 + uint32_t sector_size __attribute__((packed)); /* Always 0? */
1.44 + uint8_t mode;
1.45 + uint8_t unknown2[3]; /* Always 00 00 01? */
1.46 + uint32_t pregap __attribute__((packed));
1.47 + uint32_t offset __attribute__((packed));
1.48 + uint32_t end __attribute__((packed));
1.49 } track[0];
1.50 } __attribute__((packed));
1.51
1.52 @@ -115,13 +115,13 @@
1.53 uint8_t unknown[2]; /* always 01 01? */
1.54 uint8_t track_count;
1.55 struct nrg_daox_track {
1.56 - char unknown[10];
1.57 - uint32_t sector_size __attribute__((packed)); /* Always 0? */
1.58 - uint8_t mode;
1.59 - uint8_t unknown2[3]; /* Always 00 00 01? */
1.60 - uint64_t pregap __attribute__((packed));
1.61 - uint64_t offset __attribute__((packed));
1.62 - uint64_t end __attribute__((packed));
1.63 + char unknown[10];
1.64 + uint32_t sector_size __attribute__((packed)); /* Always 0? */
1.65 + uint8_t mode;
1.66 + uint8_t unknown2[3]; /* Always 00 00 01? */
1.67 + uint64_t pregap __attribute__((packed));
1.68 + uint64_t offset __attribute__((packed));
1.69 + uint64_t end __attribute__((packed));
1.70 } track[0];
1.71 } __attribute__((packed));
1.72
1.73 @@ -158,8 +158,8 @@
1.74 case 3: return GDROM_SEMIRAW_MODE2;
1.75 case 7: return GDROM_CDDA;
1.76 default:
1.77 - ERROR( "Unrecognized track mode %d in Nero image", mode );
1.78 - return -1;
1.79 + ERROR( "Unrecognized track mode %d in Nero image", mode );
1.80 + return -1;
1.81 }
1.82 }
1.83
1.84 @@ -170,10 +170,10 @@
1.85 fseek( f, -12, SEEK_END );
1.86 fread( &footer, sizeof(footer), 1, f );
1.87 if( GUINT32_FROM_BE(footer.v50.id) == NERO_V50_ID ||
1.88 - GUINT32_FROM_BE(footer.v55.id) == NERO_V55_ID ) {
1.89 - return TRUE;
1.90 + GUINT32_FROM_BE(footer.v55.id) == NERO_V55_ID ) {
1.91 + return TRUE;
1.92 } else {
1.93 - return FALSE;
1.94 + return FALSE;
1.95 }
1.96 }
1.97
1.98 @@ -198,151 +198,151 @@
1.99 fseek( f, -12, SEEK_END );
1.100 fread( &footer, sizeof(footer), 1, f );
1.101 if( GUINT32_FROM_BE(footer.v50.id) == NERO_V50_ID ) {
1.102 - INFO( "Loading Nero 5.0 image" );
1.103 - fseek( f, GUINT32_FROM_BE(footer.v50.offset), SEEK_SET );
1.104 + INFO( "Loading Nero 5.0 image" );
1.105 + fseek( f, GUINT32_FROM_BE(footer.v50.offset), SEEK_SET );
1.106 } else if( GUINT32_FROM_BE(footer.v55.id) == NERO_V55_ID ) {
1.107 - INFO( "Loading Nero 5.5+ image" );
1.108 - fseek( f, (uint32_t)GUINT64_FROM_BE(footer.v55.offset), SEEK_SET );
1.109 + INFO( "Loading Nero 5.5+ image" );
1.110 + fseek( f, (uint32_t)GUINT64_FROM_BE(footer.v55.offset), SEEK_SET );
1.111 } else {
1.112 - /* Not a (recognized) Nero image */
1.113 - return NULL;
1.114 + /* Not a (recognized) Nero image */
1.115 + return NULL;
1.116 }
1.117 -
1.118 +
1.119 disc = gdrom_image_new(filename, f);
1.120 if( disc == NULL ) {
1.121 - ERROR("Unable to allocate memory!");
1.122 - return NULL;
1.123 + ERROR("Unable to allocate memory!");
1.124 + return NULL;
1.125 }
1.126 image = (gdrom_image_t)disc;
1.127
1.128 do {
1.129 - fread( &chunk, sizeof(chunk), 1, f );
1.130 - chunk.length = GUINT32_FROM_BE(chunk.length);
1.131 - char data[chunk.length];
1.132 - fread( data, chunk.length, 1, f );
1.133 - chunk_id = GUINT32_FROM_BE(chunk.id);
1.134 - switch( chunk_id ) {
1.135 - case CUES_ID:
1.136 - case CUEX_ID:
1.137 - cue_track_id = track_id;
1.138 - cue_track_count = ((chunk.length / sizeof(struct nrg_cues)) >> 1) - 1;
1.139 - track_id += cue_track_count;
1.140 - for( i=0; i<chunk.length; i+= sizeof(struct nrg_cues) ) {
1.141 - struct nrg_cues *cue = (struct nrg_cues *)(data+i);
1.142 - int track = 0;
1.143 - uint32_t lba;
1.144 - if( chunk_id == CUEX_ID ) {
1.145 - lba = GUINT32_FROM_BE( cue->addr ) + GDROM_PREGAP;
1.146 - } else {
1.147 - lba = msf_to_lba( cue->addr );
1.148 - }
1.149 - if( cue->track == 0 )
1.150 - continue; /* Track 0. Leadin? always 0? */
1.151 - if( cue->track == 0xAA ) { /* end of disc */
1.152 - image->track[track_id-1].sector_count =
1.153 - lba - image->track[track_id-1].lba;
1.154 - } else {
1.155 - track = cue_track_id + bcd_to_uint8(cue->track) - 1;
1.156 - if( (cue->control & 0x01) == 0 ) {
1.157 - /* Pre-gap address. */
1.158 - if( track != 0 ) {
1.159 - image->track[track-1].sector_count =
1.160 - lba - image->track[track-1].lba;
1.161 - }
1.162 - } else { /* Track-start address */
1.163 - image->track[track].lba = lba;
1.164 - image->track[track].flags = cue->type;
1.165 - }
1.166 - }
1.167 - }
1.168 - break;
1.169 - case DAOI_ID:
1.170 - dao = (struct nrg_daoi *)data;
1.171 - memcpy( image->mcn, dao->mcn, 13 );
1.172 - image->mcn[13] = '\0';
1.173 - assert( dao->track_count * 30 + 22 == chunk.length );
1.174 - assert( dao->track_count == cue_track_count );
1.175 - for( i=0; i<dao->track_count; i++ ) {
1.176 - image->track[cue_track_id].sector_size = GUINT32_FROM_BE(dao->track[i].sector_size);
1.177 - image->track[cue_track_id].offset = GUINT32_FROM_BE(dao->track[i].offset);
1.178 - image->track[cue_track_id].mode = nrg_track_mode( dao->track[i].mode );
1.179 - image->track[cue_track_id].sector_count =
1.180 - (GUINT32_FROM_BE(dao->track[i].end) - GUINT32_FROM_BE(dao->track[i].offset))/
1.181 - GUINT32_FROM_BE(dao->track[i].sector_size);
1.182 - cue_track_id++;
1.183 - }
1.184 - break;
1.185 - case DAOX_ID:
1.186 - daox = (struct nrg_daox *)data;
1.187 - memcpy( image->mcn, daox->mcn, 13 );
1.188 - image->mcn[13] = '\0';
1.189 - assert( daox->track_count * 42 + 22 == chunk.length );
1.190 - assert( daox->track_count == cue_track_count );
1.191 - for( i=0; i<daox->track_count; i++ ) {
1.192 - image->track[cue_track_id].sector_size = GUINT32_FROM_BE(daox->track[i].sector_size);
1.193 - image->track[cue_track_id].offset = GUINT64_FROM_BE(daox->track[i].offset);
1.194 - image->track[cue_track_id].mode = nrg_track_mode( daox->track[i].mode );
1.195 - image->track[cue_track_id].sector_count =
1.196 - (GUINT64_FROM_BE(daox->track[i].end) - GUINT64_FROM_BE(daox->track[i].offset))/
1.197 - GUINT32_FROM_BE(daox->track[i].sector_size);
1.198 - cue_track_id++;
1.199 - }
1.200 - break;
1.201 -
1.202 - case SINF_ID:
1.203 - /* Data is a single 32-bit number representing number of tracks in session */
1.204 - i = GUINT32_FROM_BE( *(uint32_t *)data );
1.205 - while( i-- > 0 )
1.206 - image->track[session_track_id++].session = session_id;
1.207 - session_id++;
1.208 - break;
1.209 - case ETNF_ID:
1.210 - etnf = (struct nrg_etnf *)data;
1.211 - count = chunk.length / sizeof(struct nrg_etnf);
1.212 - for( i=0; i < count; i++, etnf++ ) {
1.213 - image->track[track_id].offset = GUINT32_FROM_BE(etnf->offset);
1.214 - image->track[track_id].lba = GUINT32_FROM_BE(etnf->lba) + (i+1)*GDROM_PREGAP;
1.215 - image->track[track_id].mode = nrg_track_mode( GUINT32_FROM_BE(etnf->mode) );
1.216 - if( image->track[track_id].mode == -1 ) {
1.217 - gdrom_image_destroy_no_close(disc);
1.218 - return NULL;
1.219 - }
1.220 - if( image->track[track_id].mode == GDROM_CDDA )
1.221 - image->track[track_id].flags = 0x01;
1.222 - else
1.223 - image->track[track_id].flags = 0x01 | TRACK_DATA;
1.224 - image->track[track_id].sector_size = GDROM_SECTOR_SIZE(image->track[track_id].mode);
1.225 - image->track[track_id].sector_count = GUINT32_FROM_BE(etnf->length) /
1.226 - image->track[track_id].sector_size;
1.227 - track_id++;
1.228 - }
1.229 - break;
1.230 - case ETN2_ID:
1.231 - etn2 = (struct nrg_etn2 *)data;
1.232 - count = chunk.length / sizeof(struct nrg_etn2);
1.233 - for( i=0; i < count; i++, etn2++ ) {
1.234 - image->track[track_id].offset = (uint32_t)GUINT64_FROM_BE(etn2->offset);
1.235 - image->track[track_id].lba = GUINT32_FROM_BE(etn2->lba) + (i+1)*GDROM_PREGAP;
1.236 - image->track[track_id].mode = nrg_track_mode( GUINT32_FROM_BE(etn2->mode) );
1.237 - if( image->track[track_id].mode == -1 ) {
1.238 - gdrom_image_destroy_no_close(disc);
1.239 - return NULL;
1.240 - }
1.241 - if( image->track[track_id].mode == GDROM_CDDA )
1.242 - image->track[track_id].flags = 0x01;
1.243 - else
1.244 - image->track[track_id].flags = 0x01 | TRACK_DATA;
1.245 - image->track[track_id].sector_size = GDROM_SECTOR_SIZE(image->track[track_id].mode);
1.246 - image->track[track_id].sector_count = (uint32_t)(GUINT64_FROM_BE(etn2->length) /
1.247 - image->track[track_id].sector_size);
1.248 - track_id++;
1.249 - }
1.250 - break;
1.251 + fread( &chunk, sizeof(chunk), 1, f );
1.252 + chunk.length = GUINT32_FROM_BE(chunk.length);
1.253 + char data[chunk.length];
1.254 + fread( data, chunk.length, 1, f );
1.255 + chunk_id = GUINT32_FROM_BE(chunk.id);
1.256 + switch( chunk_id ) {
1.257 + case CUES_ID:
1.258 + case CUEX_ID:
1.259 + cue_track_id = track_id;
1.260 + cue_track_count = ((chunk.length / sizeof(struct nrg_cues)) >> 1) - 1;
1.261 + track_id += cue_track_count;
1.262 + for( i=0; i<chunk.length; i+= sizeof(struct nrg_cues) ) {
1.263 + struct nrg_cues *cue = (struct nrg_cues *)(data+i);
1.264 + int track = 0;
1.265 + uint32_t lba;
1.266 + if( chunk_id == CUEX_ID ) {
1.267 + lba = GUINT32_FROM_BE( cue->addr ) + GDROM_PREGAP;
1.268 + } else {
1.269 + lba = msf_to_lba( cue->addr );
1.270 + }
1.271 + if( cue->track == 0 )
1.272 + continue; /* Track 0. Leadin? always 0? */
1.273 + if( cue->track == 0xAA ) { /* end of disc */
1.274 + image->track[track_id-1].sector_count =
1.275 + lba - image->track[track_id-1].lba;
1.276 + } else {
1.277 + track = cue_track_id + bcd_to_uint8(cue->track) - 1;
1.278 + if( (cue->control & 0x01) == 0 ) {
1.279 + /* Pre-gap address. */
1.280 + if( track != 0 ) {
1.281 + image->track[track-1].sector_count =
1.282 + lba - image->track[track-1].lba;
1.283 + }
1.284 + } else { /* Track-start address */
1.285 + image->track[track].lba = lba;
1.286 + image->track[track].flags = cue->type;
1.287 + }
1.288 + }
1.289 + }
1.290 + break;
1.291 + case DAOI_ID:
1.292 + dao = (struct nrg_daoi *)data;
1.293 + memcpy( image->mcn, dao->mcn, 13 );
1.294 + image->mcn[13] = '\0';
1.295 + assert( dao->track_count * 30 + 22 == chunk.length );
1.296 + assert( dao->track_count == cue_track_count );
1.297 + for( i=0; i<dao->track_count; i++ ) {
1.298 + image->track[cue_track_id].sector_size = GUINT32_FROM_BE(dao->track[i].sector_size);
1.299 + image->track[cue_track_id].offset = GUINT32_FROM_BE(dao->track[i].offset);
1.300 + image->track[cue_track_id].mode = nrg_track_mode( dao->track[i].mode );
1.301 + image->track[cue_track_id].sector_count =
1.302 + (GUINT32_FROM_BE(dao->track[i].end) - GUINT32_FROM_BE(dao->track[i].offset))/
1.303 + GUINT32_FROM_BE(dao->track[i].sector_size);
1.304 + cue_track_id++;
1.305 + }
1.306 + break;
1.307 + case DAOX_ID:
1.308 + daox = (struct nrg_daox *)data;
1.309 + memcpy( image->mcn, daox->mcn, 13 );
1.310 + image->mcn[13] = '\0';
1.311 + assert( daox->track_count * 42 + 22 == chunk.length );
1.312 + assert( daox->track_count == cue_track_count );
1.313 + for( i=0; i<daox->track_count; i++ ) {
1.314 + image->track[cue_track_id].sector_size = GUINT32_FROM_BE(daox->track[i].sector_size);
1.315 + image->track[cue_track_id].offset = GUINT64_FROM_BE(daox->track[i].offset);
1.316 + image->track[cue_track_id].mode = nrg_track_mode( daox->track[i].mode );
1.317 + image->track[cue_track_id].sector_count =
1.318 + (GUINT64_FROM_BE(daox->track[i].end) - GUINT64_FROM_BE(daox->track[i].offset))/
1.319 + GUINT32_FROM_BE(daox->track[i].sector_size);
1.320 + cue_track_id++;
1.321 + }
1.322 + break;
1.323
1.324 - case END_ID:
1.325 - end = TRUE;
1.326 - break;
1.327 - }
1.328 + case SINF_ID:
1.329 + /* Data is a single 32-bit number representing number of tracks in session */
1.330 + i = GUINT32_FROM_BE( *(uint32_t *)data );
1.331 + while( i-- > 0 )
1.332 + image->track[session_track_id++].session = session_id;
1.333 + session_id++;
1.334 + break;
1.335 + case ETNF_ID:
1.336 + etnf = (struct nrg_etnf *)data;
1.337 + count = chunk.length / sizeof(struct nrg_etnf);
1.338 + for( i=0; i < count; i++, etnf++ ) {
1.339 + image->track[track_id].offset = GUINT32_FROM_BE(etnf->offset);
1.340 + image->track[track_id].lba = GUINT32_FROM_BE(etnf->lba) + (i+1)*GDROM_PREGAP;
1.341 + image->track[track_id].mode = nrg_track_mode( GUINT32_FROM_BE(etnf->mode) );
1.342 + if( image->track[track_id].mode == -1 ) {
1.343 + gdrom_image_destroy_no_close(disc);
1.344 + return NULL;
1.345 + }
1.346 + if( image->track[track_id].mode == GDROM_CDDA )
1.347 + image->track[track_id].flags = 0x01;
1.348 + else
1.349 + image->track[track_id].flags = 0x01 | TRACK_DATA;
1.350 + image->track[track_id].sector_size = GDROM_SECTOR_SIZE(image->track[track_id].mode);
1.351 + image->track[track_id].sector_count = GUINT32_FROM_BE(etnf->length) /
1.352 + image->track[track_id].sector_size;
1.353 + track_id++;
1.354 + }
1.355 + break;
1.356 + case ETN2_ID:
1.357 + etn2 = (struct nrg_etn2 *)data;
1.358 + count = chunk.length / sizeof(struct nrg_etn2);
1.359 + for( i=0; i < count; i++, etn2++ ) {
1.360 + image->track[track_id].offset = (uint32_t)GUINT64_FROM_BE(etn2->offset);
1.361 + image->track[track_id].lba = GUINT32_FROM_BE(etn2->lba) + (i+1)*GDROM_PREGAP;
1.362 + image->track[track_id].mode = nrg_track_mode( GUINT32_FROM_BE(etn2->mode) );
1.363 + if( image->track[track_id].mode == -1 ) {
1.364 + gdrom_image_destroy_no_close(disc);
1.365 + return NULL;
1.366 + }
1.367 + if( image->track[track_id].mode == GDROM_CDDA )
1.368 + image->track[track_id].flags = 0x01;
1.369 + else
1.370 + image->track[track_id].flags = 0x01 | TRACK_DATA;
1.371 + image->track[track_id].sector_size = GDROM_SECTOR_SIZE(image->track[track_id].mode);
1.372 + image->track[track_id].sector_count = (uint32_t)(GUINT64_FROM_BE(etn2->length) /
1.373 + image->track[track_id].sector_size);
1.374 + track_id++;
1.375 + }
1.376 + break;
1.377 +
1.378 + case END_ID:
1.379 + end = TRUE;
1.380 + break;
1.381 + }
1.382 } while( !end );
1.383 image->track_count = track_id;
1.384 return disc;
.