?? lowmap.c
字號:
//
// This Program is provided by Duke University and the authors as a service to the
// research community. It is provided without cost or restrictions, except for the
// User's acknowledgement that the Program is provided on an "As Is" basis and User
// understands that Duke University and the authors make no express or implied
// warranty of any kind. Duke University and the authors specifically disclaim any
// implied warranty or merchantability or fitness for a particular purpose, and make
// no representations or warranties that the Program will not infringe the
// intellectual property rights of others. The User agrees to indemnify and hold
// harmless Duke University and the authors from and against any and all liability
// arising out of User's use of the Program.
//
// lowMap.c
//
// Copyright 2005, Austin Eliazar, Ronald Parr, Duke University
//
// Code for generating and maintaining maps (for the low level of the hierarchy)
//
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <math.h>
#include <strings.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "lowMap.h"
// Unobserved grid squares are treated of having a prior of one stopped scan per
// 8 meters of laser scan.
#define L_PRIOR (-1.0/(MAP_SCALE*8.0))
// The strength of the prior is set to be as if 4 grid squares worth of observation
// has already been made at the prior's density
#define L_PRIOR_DIST 4.0
// The global map for the low level, which contains all observations that any particle
// has made to any specific grid square.
PMapStarter lowMap[MAP_WIDTH][MAP_HEIGHT];
// The nodes of the ancestry tree are stored here. Since each particle has a unique ID,
// we can quickly access the particles via their ID in this array. See the structure
// TAncestor in map.h for more details.
TAncestor l_particleID[ID_NUMBER];
// Our current set of particles being processed by the particle filter
TParticle l_particle[PARTICLE_NUMBER];
// We like to keep track of exactly how many particles we are currently using.
int l_cur_particles_used;
int FLAG;
//
// This process should be called at the start of each iteration of the slam process.
// It clears the observation cache so that it can be reloaded with the local maps
// for the new iteration.
//
void LowInitializeFlags()
{
// Each entry observationArray corresponds to a single grid square in the global
// map. observationID is a count of how many of these entries there are. For each
// one of these entries, obsX/obsY represent its x,y coordinate in the global map.
// flagMap is an array the size of the global map, which gives a proper index into
// observationArray for that location. Therefore, we are resetting all non-zero
// entries of flagMap while resetting the arrays of obsX/obsY
while (observationID > 0) {
observationID--;
flagMap[obsX[observationID]][obsY[observationID]] = 0;
obsX[observationID] = 0;
obsY[observationID] = 0;
}
observationID = 1;
}
//
// Initializes the lowMap and the observationArray.
// Always returns 0 to indicate that it was successful.
//
void LowInitializeWorldMap()
{
int x, y;
for (y=0; y < MAP_HEIGHT; y++)
for (x=0; x < MAP_WIDTH; x++) {
// The map is a set of pointers. Null represents that it is unobserved.
lowMap[x][y] = NULL;
// flagMap is set to all zeros, indicating that location does not have an
// entry in the observationArray
flagMap[x][y] = 0;
}
// There are no entries in the observationArray yet, so obsX/obsY are set to 0
for (x=0; x < AREA; x++) {
obsX[x] = 0;
obsY[x] = 0;
}
// observationArray[0] is reserved as a constant for "unused". We start the
// array at 1.
observationID = 1;
}
//
// Frees up all of the memory being used by the map. Completely erases all info in
// that map, making it ready for another slam implementation. In hierarchical slam,
// this is called inbetween iterations of the high level slam, since each low level
// process runs essentially independently of previous low level processes.
//
void LowDestroyMap()
{
int x, y;
// Get rid of the old map.
for (y=0; y < MAP_HEIGHT; y++)
for (x=0; x < MAP_WIDTH; x++) {
while (lowMap[x][y] != NULL) {
free(lowMap[x][y]->array);
free(lowMap[x][y]);
lowMap[x][y] = NULL;
}
}
}
//
// Each grid square contains a dynamic array of the observations made at that grid
// square. Therefore, these arrays need to be resized occasionally. In the process
// of resizing the array, we also clean up any redundant or obsolete "dead" entries.
//
void LowResizeArray(TMapStarter *node, int deadID)
{
short int i, j, ID, x, y;
short int hash[ID_NUMBER];
int source, last;
TMapNode *temp;
// This is a special flag that can be raised when calling LowResizeArray, indicating
// that a specific ID is "dead". Currently this is only used when the ancestry tree
// is pruning off a dead branch, and the process of removing the corresponding
// observations leads to a reduction in the size of a dynamic array.
if (deadID >= 0)
node->dead++;
// Create a new array of the appropriate size.
// Don't count the dead entries in computing the new size
node->size = (int)(ceil((node->total - node->dead)*1.75));
temp = (TMapNode *) malloc(sizeof(TMapNode)*node->size);
if (temp == NULL) fprintf(stderr, "Malloc failed in expansion of arrays. %d\n", node->size);
// Initialize our hash table.
for (i=0; i < ID_NUMBER; i++)
hash[i] = -1;
j = 0;
// Run through each entry in our old array of observations.
for (i=0; i < node->total; i++) {
if (node->array[i].ID == deadID) {
// Denote that this has been removed already. Therefore, we won't try to remove it later.
// We don't bother actually removing the source, since the only way that we can have a deadID is if we are in
// the process of removing all updates that deadID has made.
l_particleID[deadID].mapEntries[node->array[i].source].node = -1;
}
// This observation is the first one of this ID entered into the new array. Just copy it over, and note its position.
else if (hash[node->array[i].ID] == -1) {
// Copy the information into the new array.
temp[j].ID = node->array[i].ID;
temp[j].source = node->array[i].source;
temp[j].parentGen = node->array[i].parentGen;
temp[j].hits = node->array[i].hits;
temp[j].distance = node->array[i].distance;
// This entry is moving- alter its source to track it
l_particleID[ temp[j].ID ].mapEntries[ temp[j].source ].node = j;
// Note that an observation with this ID has already been entered into the new array, and where that was entered.
hash[node->array[i].ID] = j;
j++;
}
// There is already an entry in the new array with the same ID, and this current observation is
// actually more recent (as indicated by having seen more distance of laser scans). This current
// observation will replace the older one.
else if (node->array[i].distance > temp[hash[node->array[i].ID]].distance) {
// We set a couple of values to shorter variable names, in order to reduce indirection and make
// reading the code easier.
ID = node->array[i].ID; // The ID of the observations in conflict.
source = temp[hash[ID]].source; // The ancestor node corresponding to that ID
// Remove the source of the dead entry
l_particleID[ID].total--;
last = l_particleID[ID].total;
l_particleID[ID].mapEntries[source].x = l_particleID[ID].mapEntries[last].x;
l_particleID[ID].mapEntries[source].y = l_particleID[ID].mapEntries[last].y;
l_particleID[ID].mapEntries[source].node = l_particleID[ID].mapEntries[last].node;
// The last source entry was moved into this newly vacated position. Make sure that the
// observation it links to notes the new source position.
x = l_particleID[ID].mapEntries[source].x;
y = l_particleID[ID].mapEntries[source].y;
if ((lowMap[x][y] == node) && (l_particleID[ID].mapEntries[source].node < i))
temp[hash[ID]].source = source;
else
lowMap[x][y]->array[ l_particleID[ID].mapEntries[source].node ].source = source;
// Copy the more recent information into the slot previously held by the dead entry
temp[hash[ID]].source = node->array[i].source;
temp[hash[ID]].hits = node->array[i].hits;
temp[hash[ID]].distance = node->array[i].distance;
// We do not copy over the parentGen- we are inheriting it from the dead entry, since it was the predecessor
// The ID does not need to be copied, since it was necessarily the same for both observations.
// This entry is moving- alter its source to track it
l_particleID[ID].mapEntries[ node->array[i].source ].node = hash[ID];
}
// There was already an entry for this ID. This new entry is an older form of the observation already recorded. Therefore,
// the new entry is dead, and should not be copied over, and it's source in the ancestry tree should be removed.
else {
// The new entry is an older form of the one already entered. We should inherit the new parentGen
if (node->array[i].parentGen != -1)
temp[hash[node->array[i].ID]].parentGen = node->array[i].parentGen;
ID = node->array[i].ID;
source = node->array[i].source;
// Remove the source of the dead entry
l_particleID[ID].total--;
last = l_particleID[ID].total;
if (last != source) {
l_particleID[ID].mapEntries[source].x = l_particleID[ID].mapEntries[last].x;
l_particleID[ID].mapEntries[source].y = l_particleID[ID].mapEntries[last].y;
l_particleID[ID].mapEntries[source].node = l_particleID[ID].mapEntries[last].node;
// A source entry was moved. Make sure that the observation it links to notes the new source position.
x = l_particleID[ID].mapEntries[source].x;
y = l_particleID[ID].mapEntries[source].y;
if ((lowMap[x][y] == node) && (l_particleID[ID].mapEntries[source].node <= i))
temp[hash[ID]].source = source;
else
lowMap[x][y]->array[ l_particleID[ID].mapEntries[source].node ].source = source;
}
}
}
// Note the new total, which should be the previous size minus the dead.
node->total = j;
// After completing this process, we have removed all dead entries.
node->dead = 0;
free(node->array);
node->array = temp;
}
//
// When we add a new entry to workingArray, there is a chance that we will run into a dead entry.
// If so, we will need to delete the dead entry, by copying the last entry in the array onto its
// location. We then need to recursively add the entry (that we just copied onto that spot) to
// the workingArray
//
static void AddToWorkingArray(int i, TMapStarter *node, short int workingArray[])
{
int j, source, last;
TEntryList *entries;
// Keep an eye out for dead entries. They will be made apparent when two entries both have the same ID.
if (workingArray[node->array[i].ID] == -1)
workingArray[node->array[i].ID] = i;
else {
// The node we are currently looking at is the dead one.
if (node->array[i].distance < node->array[ workingArray[node->array[i].ID] ].distance) {
// Otherwise, remove the source, then remove the entry. Follow with a recursive call.
j = i;
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -