Difference between revisions of "User:Strife Onizuka/String Tree"

From Second Life Wiki
Jump to navigation Jump to search
Line 1: Line 1:
This is just a bit of silliness, it's sanity has not been vetted. The purpose of this code is to look at the costs of splitting a string into chunks of a specific size. The problem with getting all sequential chunks of a specific set size is that doing so is O(N<sup>2</sup>). This script hopes to reduce that. As previously stated, I have no idea if it does. It probably doesn't. I'm hoping for something like O(N*log2(N)).
This is just a bit of silliness, it's sanity has not been vetted. The purpose of this code is to look at the costs of splitting a string into chunks of a specific size. The problem with getting all sequential chunks of a specific set size is that doing so is O(N<sup>2</sup>). This script hopes to reduce that. As previously stated, I have no idea if it does. It probably doesn't. I'm hoping for something like O(N*log2(N)).
Results: It works! At 25 chunks you break even between just doing flat llGetSubString vs this. Changing how split is done will improve that. Weather or not it's faster I don't know, I've been running this in LSLEditor. My goal has been to reduce read bottlenecks on the string.
<lsl>
<lsl>
integer chunk;
integer chunk = -1;
list buffer;
list buffer;
integer cost;
string getNext() {
string getNext() {
     string last = llList2String(buffer, -1);
     string last = llList2String(buffer, -1);
     buffer = llDeleteSubList(buffer, -1, -1);
     buffer = llDeleteSubList(buffer, -1, -1);
     integer size = llStringLength(last);
     integer size = llStringLength(last);
    cost += ((size + chunk - 1) / chunk);
     if(size > chunk) {
     if(size > chunk) {
         //The advantage of keeping the tree unbalanced this way is that split only needs to be cleverly calculated once.
         //The advantage of keeping the tree unbalanced this way is that split only needs to be cleverly calculated once.
         integer split = (llCeil(llLog(size / chunk) * 1.4426950408889634073599246810019) - 1) * chunk;
         integer split = (1 << (llCeil(llLog((size + chunk - 1) / chunk) * 1.4426950408889634073599246810019) - 1)) * chunk;
         do {
         do {
            cost += ((size + chunk - 1) / chunk) * 2;
             buffer += llGetSubString(last, split, -1);
             buffer += llGetSubString(last, split, -1);
             last = llDeleteSubString(last, split, -1);
             last = llDeleteSubString(last, split, -1);
Line 16: Line 22:
             split = split >> 1;
             split = split >> 1;
         } while(size > chunk);
         } while(size > chunk);
     }
     } else if (!size)
        chunk = -1;
     return last;
     return last;
}
}
 
setup(string str, integer size) {
setup(string str, integer size) {
    if(~chunk) llOwnerSay("Warning: Buffer may still be in use!");
     buffer = [str];
     buffer = [str];
     chunk = size;
     chunk = size;
    cost = 0;
    size = (llStringLength(str) + chunk - 1) / chunk;
    llOwnerSay("Flat: " + ((size * (size + 1)) / 2) + "  Chunks: " + chunk);
}
clear() {
    chunk = -1;
}
default
{
    state_entry()
    {
        setup("12341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234", 1);
        string value;
        while(value = getNext())
            ;//llOwnerSay(value);
        llOwnerSay("Cost: " + cost);
    }
}
}
</lsl>
</lsl>

Revision as of 11:36, 14 March 2014

This is just a bit of silliness, it's sanity has not been vetted. The purpose of this code is to look at the costs of splitting a string into chunks of a specific size. The problem with getting all sequential chunks of a specific set size is that doing so is O(N2). This script hopes to reduce that. As previously stated, I have no idea if it does. It probably doesn't. I'm hoping for something like O(N*log2(N)).

Results: It works! At 25 chunks you break even between just doing flat llGetSubString vs this. Changing how split is done will improve that. Weather or not it's faster I don't know, I've been running this in LSLEditor. My goal has been to reduce read bottlenecks on the string. <lsl> integer chunk = -1; list buffer; integer cost;

string getNext() {

   string last = llList2String(buffer, -1);
   buffer = llDeleteSubList(buffer, -1, -1);
   integer size = llStringLength(last);
   cost += ((size + chunk - 1) / chunk);
   if(size > chunk) {
       //The advantage of keeping the tree unbalanced this way is that split only needs to be cleverly calculated once.
       integer split = (1 << (llCeil(llLog((size + chunk - 1) / chunk) * 1.4426950408889634073599246810019) - 1)) * chunk;
       do {
           cost += ((size + chunk - 1) / chunk) * 2;
           buffer += llGetSubString(last, split, -1);
           last = llDeleteSubString(last, split, -1);
           size = split;
           split = split >> 1;
       } while(size > chunk);
   } else if (!size)
       chunk = -1;
   return last;

}

setup(string str, integer size) {

   if(~chunk) llOwnerSay("Warning: Buffer may still be in use!");
   buffer = [str];
   chunk = size;
   cost = 0;
   size = (llStringLength(str) + chunk - 1) / chunk;
   llOwnerSay("Flat: " + ((size * (size + 1)) / 2) + "   Chunks: " + chunk);

}

clear() {

   chunk = -1;

}

default {

   state_entry()
   {
       setup("12341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234123412341234", 1);
       string value;
       while(value = getNext())
           ;//llOwnerSay(value);
       llOwnerSay("Cost: " + cost);
   }

} </lsl>